Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
	drivers/net/wireless/rtlwifi/pci.c
	net/netfilter/ipvs/ip_vs_core.c
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d3d653a..3dcb26c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -106,16 +106,6 @@
 	when the number of entries in the pool is very small).
 	Measured in seconds.
 
-inet_peer_gc_mintime - INTEGER
-	Minimum interval between garbage collection passes.  This interval is
-	in effect under high memory pressure on the pool.
-	Measured in seconds.
-
-inet_peer_gc_maxtime - INTEGER
-	Minimum interval between garbage collection passes.  This interval is
-	in effect under low (or absent) memory pressure on the pool.
-	Measured in seconds.
-
 TCP variables:
 
 somaxconn - INTEGER
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 8b6e201..3c55312 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -16,6 +16,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <asm/natfeat.h>
 #include <asm/virtconvert.h>
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index e4c9525..493a693 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -8,6 +8,7 @@
 
 #include <linux/atm.h>
 #include <linux/atmdev.h>
+#include <linux/interrupt.h>
 #include <linux/sonet.h>
 #include <linux/skbuff.h>
 #include <linux/time.h>
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index ef7a658..7c7b571 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -44,6 +44,7 @@
 #include <linux/ioport.h> /* for request_region */
 #include <linux/uio.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/capability.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index d58e3fc..2875061 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -38,6 +38,7 @@
 #include <linux/delay.h>
 #include <linux/uio.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1f8d724..be0dbfe 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -37,6 +37,7 @@
 #include <linux/atm.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/wait.h>
 #include <linux/jiffies.h>
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index dee4f01..957106f 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -53,6 +53,7 @@
 #include <linux/delay.h>  
 #include <linux/uio.h>  
 #include <linux/init.h>  
+#include <linux/interrupt.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
 #include <asm/system.h>  
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 6249179..7f8c513 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -16,6 +16,7 @@
 #include <linux/delay.h>
 #include <linux/uio.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/atm_zatm.h>
 #include <linux/capability.h>
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 353781b..83e9adf 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -13,6 +13,11 @@
 	  Bus driver for Broadcom specific Advanced Microcontroller Bus
 	  Architecture.
 
+# Support for Block-I/O. SELECT this from the driver that needs it.
+config BCMA_BLOCKIO
+	bool
+	depends on BCMA
+
 config BCMA_HOST_PCI_POSSIBLE
 	bool
 	depends on BCMA && PCI = y
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 0d56245..cde0182 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,4 +1,4 @@
-bcma-y					+= main.o scan.o core.o
+bcma-y					+= main.o scan.o core.o sprom.o
 bcma-y					+= driver_chipcommon.o driver_chipcommon_pmu.o
 bcma-y					+= driver_pci.o
 bcma-$(CONFIG_BCMA_HOST_PCI)		+= host_pci.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 2f72e9c..12a75ab 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -19,6 +19,9 @@
 /* scan.c */
 int bcma_bus_scan(struct bcma_bus *bus);
 
+/* sprom.c */
+int bcma_sprom_get(struct bcma_bus *bus);
+
 #ifdef CONFIG_BCMA_HOST_PCI
 /* host_pci.c */
 extern int __init bcma_host_pci_init(void);
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index e757e4e..789d68b 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -161,3 +161,26 @@
 {
 	bcma_pcicore_serdes_workaround(pc);
 }
+
+int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+			  bool enable)
+{
+	struct pci_dev *pdev = pc->core->bus->host_pci;
+	u32 coremask, tmp;
+	int err;
+
+	err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+	if (err)
+		goto out;
+
+	coremask = BIT(core->core_index) << 8;
+	if (enable)
+		tmp |= coremask;
+	else
+		tmp &= ~coremask;
+
+	err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+	return err;
+}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 471a040..2a526bc 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -65,6 +65,54 @@
 	iowrite32(value, core->bus->mmio + offset);
 }
 
+#ifdef CONFIG_BCMA_BLOCKIO
+void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+			      size_t count, u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->bus->mmio + offset;
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	switch (reg_width) {
+	case sizeof(u8):
+		ioread8_rep(addr, buffer, count);
+		break;
+	case sizeof(u16):
+		WARN_ON(count & 1);
+		ioread16_rep(addr, buffer, count >> 1);
+		break;
+	case sizeof(u32):
+		WARN_ON(count & 3);
+		ioread32_rep(addr, buffer, count >> 2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
+			       size_t count, u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->bus->mmio + offset;
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	switch (reg_width) {
+	case sizeof(u8):
+		iowrite8_rep(addr, buffer, count);
+		break;
+	case sizeof(u16):
+		WARN_ON(count & 1);
+		iowrite16_rep(addr, buffer, count >> 1);
+		break;
+	case sizeof(u32):
+		WARN_ON(count & 3);
+		iowrite32_rep(addr, buffer, count >> 2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+#endif
+
 static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
 {
 	if (core->bus->mapped_core != core)
@@ -87,6 +135,10 @@
 	.write8		= bcma_host_pci_write8,
 	.write16	= bcma_host_pci_write16,
 	.write32	= bcma_host_pci_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+	.block_read	= bcma_host_pci_block_read,
+	.block_write	= bcma_host_pci_block_write,
+#endif
 	.aread32	= bcma_host_pci_aread32,
 	.awrite32	= bcma_host_pci_awrite32,
 };
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index be52344..11e96dc 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -89,6 +89,8 @@
 		switch (bus->hosttype) {
 		case BCMA_HOSTTYPE_PCI:
 			core->dev.parent = &bus->host_pci->dev;
+			core->dma_dev = &bus->host_pci->dev;
+			core->irq = bus->host_pci->irq;
 			break;
 		case BCMA_HOSTTYPE_NONE:
 		case BCMA_HOSTTYPE_SDIO:
@@ -144,6 +146,13 @@
 		bcma_core_pci_init(&bus->drv_pci);
 	}
 
+	/* Try to get SPROM */
+	err = bcma_sprom_get(bus);
+	if (err) {
+		pr_err("Failed to get SPROM: %d\n", err);
+		return -ENOENT;
+	}
+
 	/* Register found cores */
 	bcma_register_cores(bus);
 
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
new file mode 100644
index 0000000..ffbb0e3
--- /dev/null
+++ b/drivers/bcma/sprom.c
@@ -0,0 +1,162 @@
+/*
+ * Broadcom specific AMBA
+ * SPROM reading
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#define SPOFF(offset)	((offset) / sizeof(u16))
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+static void bcma_sprom_read(struct bcma_bus *bus, u16 *sprom)
+{
+	int i;
+	for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++)
+		sprom[i] = bcma_read16(bus->drv_cc.core,
+				       BCMA_CC_SPROM + (i * 2));
+}
+
+/**************************************************
+ * Validation.
+ **************************************************/
+
+static inline u8 bcma_crc8(u8 crc, u8 data)
+{
+	/* Polynomial:   x^8 + x^7 + x^6 + x^4 + x^2 + 1   */
+	static const u8 t[] = {
+		0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+		0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+		0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+		0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+		0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+		0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+		0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+		0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+		0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+		0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+		0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+		0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+		0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+		0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+		0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+		0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+		0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+		0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+		0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+		0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+		0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+		0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+		0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+		0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+		0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+		0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+		0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+		0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+		0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+		0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+		0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+		0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
+	};
+	return t[crc ^ data];
+}
+
+static u8 bcma_sprom_crc(const u16 *sprom)
+{
+	int word;
+	u8 crc = 0xFF;
+
+	for (word = 0; word < SSB_SPROMSIZE_WORDS_R4 - 1; word++) {
+		crc = bcma_crc8(crc, sprom[word] & 0x00FF);
+		crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
+	}
+	crc = bcma_crc8(crc, sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & 0x00FF);
+	crc ^= 0xFF;
+
+	return crc;
+}
+
+static int bcma_sprom_check_crc(const u16 *sprom)
+{
+	u8 crc;
+	u8 expected_crc;
+	u16 tmp;
+
+	crc = bcma_sprom_crc(sprom);
+	tmp = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_CRC;
+	expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
+	if (crc != expected_crc)
+		return -EPROTO;
+
+	return 0;
+}
+
+static int bcma_sprom_valid(const u16 *sprom)
+{
+	u16 revision;
+	int err;
+
+	err = bcma_sprom_check_crc(sprom);
+	if (err)
+		return err;
+
+	revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV;
+	if (revision != 8) {
+		pr_err("Unsupported SPROM revision: %d\n", revision);
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+/**************************************************
+ * SPROM extraction.
+ **************************************************/
+
+static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
+{
+	u16 v;
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
+		*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
+	}
+}
+
+int bcma_sprom_get(struct bcma_bus *bus)
+{
+	u16 *sprom;
+	int err = 0;
+
+	if (!bus->drv_cc.core)
+		return -EOPNOTSUPP;
+
+	sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
+			GFP_KERNEL);
+	if (!sprom)
+		return -ENOMEM;
+
+	bcma_sprom_read(bus, sprom);
+
+	err = bcma_sprom_valid(sprom);
+	if (err)
+		goto out;
+
+	bcma_sprom_extract_r8(bus, sprom);
+
+out:
+	kfree(sprom);
+	return err;
+}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 4a5abaf..9227f4a 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -148,7 +148,7 @@
 				return -EINVAL;
 			return netlink_dump_start(nls, skb, nlh,
 						  client->cb_table[op].dump,
-						  NULL);
+						  NULL, 0);
 		}
 	}
 
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfc455..444470a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -36,6 +36,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/inetdevice.h>
+#include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 2f02ab0..342cbc1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -45,6 +45,7 @@
 #include <scsi/libiscsi.h>
 #include <scsi/scsi_transport_iscsi.h>
 
+#include <linux/interrupt.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
 #include <linux/list.h>
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 472a2af..861b651 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -20,6 +20,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  */
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index f6f3c87..a440d7f 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -152,6 +152,7 @@
 
 #define HFC_MULTI_VERSION	"2.03"
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index b01a7be..3261de1 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -44,6 +44,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index bc0529a..6218775 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -38,6 +38,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 64ecc6f..d2ffb1d 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -20,6 +20,7 @@
  *
  */
 
+#include <linux/irqreturn.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/mISDNhw.h>
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index db25b6b..5ef9f11 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -20,6 +20,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 9e07246..4d0d41e 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -22,6 +22,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 9e84870..e10e028 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -21,6 +21,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index de1c669..0a5c42a 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/timer.h>
 #include <linux/wait.h>
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 8b0a7d8..478ebab 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -25,6 +25,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/isapnp.h>
 #include <linux/kmod.h>
diff --git a/drivers/media/dvb/b2c2/flexcop-common.h b/drivers/media/dvb/b2c2/flexcop-common.h
index 9e2148a..437912e 100644
--- a/drivers/media/dvb/b2c2/flexcop-common.h
+++ b/drivers/media/dvb/b2c2/flexcop-common.h
@@ -6,6 +6,7 @@
 #ifndef __FLEXCOP_COMMON_H__
 #define __FLEXCOP_COMMON_H__
 
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/mutex.h>
 
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b2b0c45..55e6533 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -22,6 +22,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/proc_fs.h>
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
index bd400d2..49dbca1 100644
--- a/drivers/media/dvb/mantis/mantis_common.h
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -21,6 +21,7 @@
 #ifndef __MANTIS_COMMON_H
 #define __MANTIS_COMMON_H
 
+#include <linux/interrupt.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
 
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 7cb79ec..80fb510 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -26,6 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5b73298..84e68f1 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -49,6 +49,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ethtool.h>
 
 #include <asm/uaccess.h>
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 10c4505..73b10b0 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -60,6 +60,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 98517a3..ed6355c 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -100,6 +100,7 @@
 #include <linux/compiler.h>
 #include <linux/pci.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index 3d9e8fb..58a12e4 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -9,6 +9,7 @@
 
 #include <linux/if_ether.h>
 #include <linux/ioport.h>
+#include <linux/irqreturn.h>
 #include <linux/skbuff.h>
 
 #define TX_PAGES 12	/* Two Tx slots */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 19f04a3..f5919c2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2115,7 +2115,6 @@
 
 config E1000E
 	tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
-	select CRC32
 	depends on PCI && (!SPARC32 || BROKEN)
 	select CRC32
 	---help---
@@ -2197,15 +2196,6 @@
 
 source "drivers/net/ixp2000/Kconfig"
 
-config MYRI_SBUS
-	tristate "MyriCOM Gigabit Ethernet support"
-	depends on SBUS
-	help
-	  This driver supports MyriCOM Sbus gigabit Ethernet cards.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called myri_sbus.  This is recommended.
-
 config NS83820
 	tristate "National Semiconductor DP83820 support"
 	depends on PCI
@@ -2561,6 +2551,15 @@
 	  ML7223 is companion chip for Intel Atom E6xx series.
 	  ML7223 is completely compatible for Intel EG20T PCH.
 
+config FTGMAC100
+	tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+	depends on ARM
+	select PHYLIB
+	help
+	  This driver supports the FTGMAC100 Gigabit Ethernet controller
+	  from Faraday. It is used on Faraday A369, Andes AG102 and some
+	  other ARM/NDS32 SoC's.
+
 endif # NETDEV_1000
 
 #
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 776a478..86f6c8d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -59,7 +59,6 @@
 obj-$(CONFIG_SUNLANCE) += sunlance.o
 obj-$(CONFIG_SUNQE) += sunqe.o
 obj-$(CONFIG_SUNBMAC) += sunbmac.o
-obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
 obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
 obj-$(CONFIG_CASSINI) += cassini.o
 obj-$(CONFIG_SUNVNET) += sunvnet.o
@@ -148,6 +147,7 @@
 obj-$(CONFIG_NE_H8300) += ne-h8300.o
 obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_FTGMAC100) += ftgmac100.o
 obj-$(CONFIG_FTMAC100) += ftmac100.o
 
 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 5181e93..f07b2e9 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 0681da7..fd25a3b 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -1,5 +1,6 @@
 #ifndef _ACENIC_H_
 #define _ACENIC_H_
+#include <linux/interrupt.h>
 
 
 /*
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 241b185..db6d2da 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -75,6 +75,7 @@
 #include <linux/compiler.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
@@ -1958,7 +1959,7 @@
 						 IPG_CONVERGE_JIFFIES;
 		lp->ipg_data.ipg = DEFAULT_IPG;
 		lp->ipg_data.ipg_state = CSTATE;
-	};
+	}
 
 	/*  display driver and device information */
 
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 2fe60f1..5477373 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -36,6 +36,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/interrupt.h>
 #include <linux/jiffies.h>
 
 #include <asm/system.h>
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 9efbbba..25197b6 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/bootmem.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <asm/io.h>
 #include <linux/arcdevice.h>
 
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 3727282..45c61a2 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/bootmem.h>
 #include <linux/arcdevice.h>
 #include <linux/com20020.h>
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 48a1dbf..d427493 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -34,6 +34,7 @@
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/arcdevice.h>
 #include <linux/com20020.h>
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index c9e4594..7bfb91f 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/arcdevice.h>
 #include <linux/com20020.h>
 
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index eb27976..487d780 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -33,6 +33,7 @@
 #include <linux/netdevice.h>
 #include <linux/bootmem.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <asm/io.h>
 #include <linux/arcdevice.h>
 
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f3b46f7..b80fbe4 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index e07b314..29dc435 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -19,6 +19,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/mii.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 0b46b8e..4317af8 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -19,6 +19,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index a7b0caa..bb62b3f 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -21,6 +21,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/crc32.h>
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 925929d..dfe4370 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -24,6 +24,7 @@
 
 #include <linux/version.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/module.h>
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index 490d3b3..9ac37e3 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -25,6 +25,7 @@
 
 #include <linux/version.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/module.h>
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index a69331e..085560e 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -25,6 +25,7 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/ssb/ssb.h>
 #include <linux/slab.h>
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index f1573d4..4753bb9 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -18,6 +18,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index a7db870..a36f5a6 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -87,6 +87,7 @@
 
 #define MAX_RSS_QS		4	/* BE limit is 4 queues/port */
 #define MAX_RX_QS		(MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define MAX_TX_QS		8
 #define BE_MAX_MSIX_VECTORS	(MAX_RX_QS + 1)/* RX + TX */
 #define BE_NAPI_WEIGHT		64
 #define MAX_RX_POST 		BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -170,7 +171,6 @@
 	u32 be_tx_reqs;		/* number of TX requests initiated */
 	u32 be_tx_stops;	/* number of times TX Q was stopped */
 	u32 be_tx_wrbs;		/* number of tx WRBs used */
-	u32 be_tx_events;	/* number of tx completion events  */
 	u32 be_tx_compl;	/* number of tx completion entries processed */
 	ulong be_tx_jiffies;
 	u64 be_tx_bytes;
@@ -184,6 +184,7 @@
 	struct be_queue_info cq;
 	/* Remember the skbs that were transmitted */
 	struct sk_buff *sent_skb_list[TX_Q_LEN];
+	struct be_tx_stats stats;
 };
 
 /* Struct to remember the pages posted for rx frags */
@@ -319,8 +320,8 @@
 
 	/* TX Rings */
 	struct be_eq_obj tx_eq;
-	struct be_tx_obj tx_obj;
-	struct be_tx_stats tx_stats;
+	struct be_tx_obj tx_obj[MAX_TX_QS];
+	u8 num_tx_qs;
 
 	u32 cache_line_break[8];
 
@@ -391,7 +392,7 @@
 extern const struct ethtool_ops be_ethtool_ops;
 
 #define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
-#define tx_stats(adapter)		(&adapter->tx_stats)
+#define tx_stats(txo)			(&txo->stats)
 #define rx_stats(rxo)			(&rxo->stats)
 
 #define BE_SET_NETDEV_OPS(netdev, ops)	(netdev->netdev_ops = ops)
@@ -405,6 +406,10 @@
 	for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
 		i++, rxo++)
 
+#define for_all_tx_queues(adapter, txo, i)				\
+	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
+		i++, txo++)
+
 #define PAGE_SHIFT_4K		12
 #define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)
 
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 81654ae..0c12c2d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -799,12 +799,12 @@
 	return len_encoded;
 }
 
-int be_cmd_mccq_create(struct be_adapter *adapter,
+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
 			struct be_queue_info *mccq,
 			struct be_queue_info *cq)
 {
 	struct be_mcc_wrb *wrb;
-	struct be_cmd_req_mcc_create *req;
+	struct be_cmd_req_mcc_ext_create *req;
 	struct be_dma_mem *q_mem = &mccq->dma_mem;
 	void *ctxt;
 	int status;
@@ -859,6 +859,67 @@
 	return status;
 }
 
+int be_cmd_mccq_org_create(struct be_adapter *adapter,
+			struct be_queue_info *mccq,
+			struct be_queue_info *cq)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_mcc_create *req;
+	struct be_dma_mem *q_mem = &mccq->dma_mem;
+	void *ctxt;
+	int status;
+
+	if (mutex_lock_interruptible(&adapter->mbox_lock))
+		return -1;
+
+	wrb = wrb_from_mbox(adapter);
+	req = embedded_payload(wrb);
+	ctxt = &req->context;
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_MCC_CREATE);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+			be_encoded_q_len(mccq->len));
+	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+
+	be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+	status = be_mbox_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+		mccq->id = le16_to_cpu(resp->id);
+		mccq->created = true;
+	}
+
+	mutex_unlock(&adapter->mbox_lock);
+	return status;
+}
+
+int be_cmd_mccq_create(struct be_adapter *adapter,
+			struct be_queue_info *mccq,
+			struct be_queue_info *cq)
+{
+	int status;
+
+	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
+	if (status && !lancer_chip(adapter)) {
+		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
+			"or newer to avoid conflicting priorities between NIC "
+			"and FCoE traffic");
+		status = be_cmd_mccq_org_create(adapter, mccq, cq);
+	}
+	return status;
+}
+
 int be_cmd_txq_create(struct be_adapter *adapter,
 			struct be_queue_info *txq,
 			struct be_queue_info *cq)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8148cc6..d08289e 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -434,6 +434,14 @@
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
 	u16 cq_id;
+	u8 context[sizeof(struct amap_mcc_context_be) / 8];
+	struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_req_mcc_ext_create {
+	struct be_cmd_req_hdr hdr;
+	u16 num_pages;
+	u16 cq_id;
 	u32 async_event_bitmap[1];
 	u8 context[sizeof(struct amap_mcc_context_be) / 8];
 	struct phys_addr pages[8];
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index facfe3c..84e03a7 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -52,12 +52,7 @@
 	{NETSTAT_INFO(tx_errors)},
 	{NETSTAT_INFO(rx_dropped)},
 	{NETSTAT_INFO(tx_dropped)},
-	{DRVSTAT_TX_INFO(be_tx_rate)},
-	{DRVSTAT_TX_INFO(be_tx_reqs)},
-	{DRVSTAT_TX_INFO(be_tx_wrbs)},
-	{DRVSTAT_TX_INFO(be_tx_stops)},
-	{DRVSTAT_TX_INFO(be_tx_events)},
-	{DRVSTAT_TX_INFO(be_tx_compl)},
+	{DRVSTAT_INFO(be_tx_events)},
 	{DRVSTAT_INFO(rx_crc_errors)},
 	{DRVSTAT_INFO(rx_alignment_symbol_errors)},
 	{DRVSTAT_INFO(rx_pause_frames)},
@@ -111,6 +106,16 @@
 };
 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
 
+/* Stats related to multi TX queues */
+static const struct be_ethtool_stat et_tx_stats[] = {
+	{DRVSTAT_TX_INFO(be_tx_rate)},
+	{DRVSTAT_TX_INFO(be_tx_reqs)},
+	{DRVSTAT_TX_INFO(be_tx_wrbs)},
+	{DRVSTAT_TX_INFO(be_tx_stops)},
+	{DRVSTAT_TX_INFO(be_tx_compl)}
+};
+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
+
 static const char et_self_tests[][ETH_GSTRING_LEN] = {
 	"MAC Loopback test",
 	"PHY Loopback test",
@@ -253,17 +258,15 @@
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_rx_obj *rxo;
+	struct be_tx_obj *txo;
 	void *p = NULL;
-	int i, j;
+	int i, j, base;
 
 	for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
 		switch (et_stats[i].type) {
 		case NETSTAT:
 			p = &netdev->stats;
 			break;
-		case DRVSTAT_TX:
-			p = &adapter->tx_stats;
-			break;
 		case DRVSTAT:
 			p = &adapter->drv_stats;
 			break;
@@ -274,6 +277,7 @@
 				*(u64 *)p: *(u32 *)p;
 	}
 
+	base = ETHTOOL_STATS_NUM;
 	for_all_rx_queues(adapter, rxo, j) {
 		for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
 			switch (et_rx_stats[i].type) {
@@ -285,11 +289,21 @@
 								rxo->q.id;
 				break;
 			}
-			data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
+			data[base + j * ETHTOOL_RXSTATS_NUM + i] =
 				(et_rx_stats[i].size == sizeof(u64)) ?
 					*(u64 *)p: *(u32 *)p;
 		}
 	}
+
+	base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
+	for_all_tx_queues(adapter, txo, j) {
+		for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
+			p = (u8 *)&txo->stats + et_tx_stats[i].offset;
+			data[base + j * ETHTOOL_TXSTATS_NUM + i] =
+				(et_tx_stats[i].size == sizeof(u64)) ?
+					*(u64 *)p: *(u32 *)p;
+		}
+	}
 }
 
 static void
@@ -312,6 +326,13 @@
 				data += ETH_GSTRING_LEN;
 			}
 		}
+		for (i = 0; i < adapter->num_tx_qs; i++) {
+			for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
+				sprintf(data, "txq%d: %s", i,
+					et_tx_stats[j].desc);
+				data += ETH_GSTRING_LEN;
+			}
+		}
 		break;
 	case ETH_SS_TEST:
 		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
@@ -331,7 +352,8 @@
 		return ETHTOOL_TESTS_NUM;
 	case ETH_SS_STATS:
 		return ETHTOOL_STATS_NUM +
-			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
+			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
+			adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
 	default:
 		return -EINVAL;
 	}
@@ -457,10 +479,10 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 
 	ring->rx_max_pending = adapter->rx_obj[0].q.len;
-	ring->tx_max_pending = adapter->tx_obj.q.len;
+	ring->tx_max_pending = adapter->tx_obj[0].q.len;
 
 	ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
-	ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
+	ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
 }
 
 static void
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index a485f7f..c4f564c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -362,8 +362,8 @@
 	drvs->rx_priority_pause_frames = 0;
 	drvs->pmem_fifo_overflow_drop = 0;
 	drvs->rx_pause_frames =
-		make_64bit_val(pport_stats->rx_pause_frames_lo,
-				 pport_stats->rx_pause_frames_hi);
+		make_64bit_val(pport_stats->rx_pause_frames_hi,
+				 pport_stats->rx_pause_frames_lo);
 	drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
 						pport_stats->rx_crc_errors_lo);
 	drvs->rx_control_frames =
@@ -427,6 +427,7 @@
 	struct be_drv_stats *drvs = &adapter->drv_stats;
 	struct net_device_stats *dev_stats = &adapter->netdev->stats;
 	struct be_rx_obj *rxo;
+	struct be_tx_obj *txo;
 	int i;
 
 	memset(dev_stats, 0, sizeof(*dev_stats));
@@ -450,8 +451,10 @@
 		}
 	}
 
-	dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
-	dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
+	for_all_tx_queues(adapter, txo, i) {
+		dev_stats->tx_packets += tx_stats(txo)->be_tx_pkts;
+		dev_stats->tx_bytes += tx_stats(txo)->be_tx_bytes;
+	}
 
 	/* bad pkts received */
 	dev_stats->rx_errors = drvs->rx_crc_errors +
@@ -554,9 +557,9 @@
 	return rate;
 }
 
-static void be_tx_rate_update(struct be_adapter *adapter)
+static void be_tx_rate_update(struct be_tx_obj *txo)
 {
-	struct be_tx_stats *stats = tx_stats(adapter);
+	struct be_tx_stats *stats = tx_stats(txo);
 	ulong now = jiffies;
 
 	/* Wrapped around? */
@@ -575,10 +578,11 @@
 	}
 }
 
-static void be_tx_stats_update(struct be_adapter *adapter,
+static void be_tx_stats_update(struct be_tx_obj *txo,
 			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
 {
-	struct be_tx_stats *stats = tx_stats(adapter);
+	struct be_tx_stats *stats = tx_stats(txo);
+
 	stats->be_tx_reqs++;
 	stats->be_tx_wrbs += wrb_cnt;
 	stats->be_tx_bytes += copied;
@@ -682,14 +686,13 @@
 	}
 }
 
-static int make_tx_wrbs(struct be_adapter *adapter,
+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
 		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
 {
 	dma_addr_t busaddr;
 	int i, copied = 0;
 	struct device *dev = &adapter->pdev->dev;
 	struct sk_buff *first_skb = skb;
-	struct be_queue_info *txq = &adapter->tx_obj.q;
 	struct be_eth_wrb *wrb;
 	struct be_eth_hdr_wrb *hdr;
 	bool map_single = false;
@@ -753,19 +756,19 @@
 			struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_tx_obj *tx_obj = &adapter->tx_obj;
-	struct be_queue_info *txq = &tx_obj->q;
+	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+	struct be_queue_info *txq = &txo->q;
 	u32 wrb_cnt = 0, copied = 0;
 	u32 start = txq->head;
 	bool dummy_wrb, stopped = false;
 
 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
-	copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
+	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
 	if (copied) {
 		/* record the sent skb in the sent_skb table */
-		BUG_ON(tx_obj->sent_skb_list[start]);
-		tx_obj->sent_skb_list[start] = skb;
+		BUG_ON(txo->sent_skb_list[start]);
+		txo->sent_skb_list[start] = skb;
 
 		/* Ensure txq has space for the next skb; Else stop the queue
 		 * *BEFORE* ringing the tx doorbell, so that we serialze the
@@ -774,13 +777,13 @@
 		atomic_add(wrb_cnt, &txq->used);
 		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
 								txq->len) {
-			netif_stop_queue(netdev);
+			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
 			stopped = true;
 		}
 
 		be_txq_notify(adapter, txq->id, wrb_cnt);
 
-		be_tx_stats_update(adapter, wrb_cnt, copied,
+		be_tx_stats_update(txo, wrb_cnt, copied,
 				skb_shinfo(skb)->gso_segs, stopped);
 	} else {
 		txq->head = start;
@@ -1459,11 +1462,12 @@
 	return txcp;
 }
 
-static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
+static u16 be_tx_compl_process(struct be_adapter *adapter,
+		struct be_tx_obj *txo, u16 last_index)
 {
-	struct be_queue_info *txq = &adapter->tx_obj.q;
+	struct be_queue_info *txq = &txo->q;
 	struct be_eth_wrb *wrb;
-	struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+	struct sk_buff **sent_skbs = txo->sent_skb_list;
 	struct sk_buff *sent_skb;
 	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
 	bool unmap_skb_hdr = true;
@@ -1504,7 +1508,8 @@
 }
 
 static int event_handle(struct be_adapter *adapter,
-			struct be_eq_obj *eq_obj)
+			struct be_eq_obj *eq_obj,
+			bool rearm)
 {
 	struct be_eq_entry *eqe;
 	u16 num = 0;
@@ -1517,7 +1522,10 @@
 	/* Deal with any spurious interrupts that come
 	 * without events
 	 */
-	be_eq_notify(adapter, eq_obj->q.id, true, true, num);
+	if (!num)
+		rearm = true;
+
+	be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
 	if (num)
 		napi_schedule(&eq_obj->napi);
 
@@ -1565,13 +1573,14 @@
 	BUG_ON(atomic_read(&rxq->used));
 }
 
-static void be_tx_compl_clean(struct be_adapter *adapter)
+static void be_tx_compl_clean(struct be_adapter *adapter,
+				struct be_tx_obj *txo)
 {
-	struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
-	struct be_queue_info *txq = &adapter->tx_obj.q;
+	struct be_queue_info *tx_cq = &txo->cq;
+	struct be_queue_info *txq = &txo->q;
 	struct be_eth_tx_compl *txcp;
 	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
-	struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+	struct sk_buff **sent_skbs = txo->sent_skb_list;
 	struct sk_buff *sent_skb;
 	bool dummy_wrb;
 
@@ -1580,7 +1589,7 @@
 		while ((txcp = be_tx_compl_get(tx_cq))) {
 			end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
 					wrb_index, txcp);
-			num_wrbs += be_tx_compl_process(adapter, end_idx);
+			num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
 			cmpl++;
 		}
 		if (cmpl) {
@@ -1607,7 +1616,7 @@
 		index_adv(&end_idx,
 			wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
 			txq->len);
-		num_wrbs = be_tx_compl_process(adapter, end_idx);
+		num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
 		atomic_sub(num_wrbs, &txq->used);
 	}
 }
@@ -1666,16 +1675,20 @@
 static void be_tx_queues_destroy(struct be_adapter *adapter)
 {
 	struct be_queue_info *q;
+	struct be_tx_obj *txo;
+	u8 i;
 
-	q = &adapter->tx_obj.q;
-	if (q->created)
-		be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
-	be_queue_free(adapter, q);
+	for_all_tx_queues(adapter, txo, i) {
+		q = &txo->q;
+		if (q->created)
+			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
+		be_queue_free(adapter, q);
 
-	q = &adapter->tx_obj.cq;
-	if (q->created)
-		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
-	be_queue_free(adapter, q);
+		q = &txo->cq;
+		if (q->created)
+			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+		be_queue_free(adapter, q);
+	}
 
 	/* Clear any residual events */
 	be_eq_clean(adapter, &adapter->tx_eq);
@@ -1686,56 +1699,48 @@
 	be_queue_free(adapter, q);
 }
 
+/* One TX event queue is shared by all TX compl qs */
 static int be_tx_queues_create(struct be_adapter *adapter)
 {
 	struct be_queue_info *eq, *q, *cq;
+	struct be_tx_obj *txo;
+	u8 i;
 
 	adapter->tx_eq.max_eqd = 0;
 	adapter->tx_eq.min_eqd = 0;
 	adapter->tx_eq.cur_eqd = 96;
 	adapter->tx_eq.enable_aic = false;
-	/* Alloc Tx Event queue */
+
 	eq = &adapter->tx_eq.q;
-	if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
+	if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+		sizeof(struct be_eq_entry)))
 		return -1;
 
-	/* Ask BE to create Tx Event queue */
 	if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
-		goto tx_eq_free;
-
+		goto err;
 	adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
 
-
-	/* Alloc TX eth compl queue */
-	cq = &adapter->tx_obj.cq;
-	if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
+	for_all_tx_queues(adapter, txo, i) {
+		cq = &txo->cq;
+		if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
 			sizeof(struct be_eth_tx_compl)))
-		goto tx_eq_destroy;
+			goto err;
 
-	/* Ask BE to create Tx eth compl queue */
-	if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
-		goto tx_cq_free;
+		if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
+			goto err;
 
-	/* Alloc TX eth queue */
-	q = &adapter->tx_obj.q;
-	if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
-		goto tx_cq_destroy;
+		q = &txo->q;
+		if (be_queue_alloc(adapter, q, TX_Q_LEN,
+			sizeof(struct be_eth_wrb)))
+			goto err;
 
-	/* Ask BE to create Tx eth queue */
-	if (be_cmd_txq_create(adapter, q, cq))
-		goto tx_q_free;
+		if (be_cmd_txq_create(adapter, q, cq))
+			goto err;
+	}
 	return 0;
 
-tx_q_free:
-	be_queue_free(adapter, q);
-tx_cq_destroy:
-	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
-tx_cq_free:
-	be_queue_free(adapter, cq);
-tx_eq_destroy:
-	be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
-tx_eq_free:
-	be_queue_free(adapter, eq);
+err:
+	be_tx_queues_destroy(adapter);
 	return -1;
 }
 
@@ -1876,10 +1881,10 @@
 
 	if (lancer_chip(adapter)) {
 		if (event_peek(&adapter->tx_eq))
-			tx = event_handle(adapter, &adapter->tx_eq);
+			tx = event_handle(adapter, &adapter->tx_eq, false);
 		for_all_rx_queues(adapter, rxo, i) {
 			if (event_peek(&rxo->rx_eq))
-				rx |= event_handle(adapter, &rxo->rx_eq);
+				rx |= event_handle(adapter, &rxo->rx_eq, true);
 		}
 
 		if (!(tx || rx))
@@ -1892,11 +1897,11 @@
 			return IRQ_NONE;
 
 		if ((1 << adapter->tx_eq.eq_idx & isr))
-			event_handle(adapter, &adapter->tx_eq);
+			event_handle(adapter, &adapter->tx_eq, false);
 
 		for_all_rx_queues(adapter, rxo, i) {
 			if ((1 << rxo->rx_eq.eq_idx & isr))
-				event_handle(adapter, &rxo->rx_eq);
+				event_handle(adapter, &rxo->rx_eq, true);
 		}
 	}
 
@@ -1908,7 +1913,7 @@
 	struct be_rx_obj *rxo = dev;
 	struct be_adapter *adapter = rxo->adapter;
 
-	event_handle(adapter, &rxo->rx_eq);
+	event_handle(adapter, &rxo->rx_eq, true);
 
 	return IRQ_HANDLED;
 }
@@ -1917,7 +1922,7 @@
 {
 	struct be_adapter *adapter = dev;
 
-	event_handle(adapter, &adapter->tx_eq);
+	event_handle(adapter, &adapter->tx_eq, false);
 
 	return IRQ_HANDLED;
 }
@@ -1978,45 +1983,48 @@
 	struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
 	struct be_adapter *adapter =
 		container_of(tx_eq, struct be_adapter, tx_eq);
-	struct be_queue_info *txq = &adapter->tx_obj.q;
-	struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
+	struct be_tx_obj *txo;
 	struct be_eth_tx_compl *txcp;
-	int tx_compl = 0, mcc_compl, status = 0;
-	u16 end_idx, num_wrbs = 0;
+	int tx_compl, mcc_compl, status = 0;
+	u8 i;
+	u16 num_wrbs;
 
-	while ((txcp = be_tx_compl_get(tx_cq))) {
-		end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
-				wrb_index, txcp);
-		num_wrbs += be_tx_compl_process(adapter, end_idx);
-		tx_compl++;
+	for_all_tx_queues(adapter, txo, i) {
+		tx_compl = 0;
+		num_wrbs = 0;
+		while ((txcp = be_tx_compl_get(&txo->cq))) {
+			num_wrbs += be_tx_compl_process(adapter, txo,
+				AMAP_GET_BITS(struct amap_eth_tx_compl,
+					wrb_index, txcp));
+			tx_compl++;
+		}
+		if (tx_compl) {
+			be_cq_notify(adapter, txo->cq.id, true, tx_compl);
+
+			atomic_sub(num_wrbs, &txo->q.used);
+
+			/* As Tx wrbs have been freed up, wake up netdev queue
+			 * if it was stopped due to lack of tx wrbs.  */
+			if (__netif_subqueue_stopped(adapter->netdev, i) &&
+				atomic_read(&txo->q.used) < txo->q.len / 2) {
+				netif_wake_subqueue(adapter->netdev, i);
+			}
+
+			adapter->drv_stats.be_tx_events++;
+			txo->stats.be_tx_compl += tx_compl;
+		}
 	}
 
 	mcc_compl = be_process_mcc(adapter, &status);
 
-	napi_complete(napi);
-
 	if (mcc_compl) {
 		struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 		be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
 	}
 
-	if (tx_compl) {
-		be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
+	napi_complete(napi);
 
-		atomic_sub(num_wrbs, &txq->used);
-
-		/* As Tx wrbs have been freed up, wake up netdev queue if
-		 * it was stopped due to lack of tx wrbs.
-		 */
-		if (netif_queue_stopped(adapter->netdev) &&
-			atomic_read(&txq->used) < txq->len / 2) {
-			netif_wake_queue(adapter->netdev);
-		}
-
-		tx_stats(adapter)->be_tx_events++;
-		tx_stats(adapter)->be_tx_compl += tx_compl;
-	}
-
+	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
 	return 1;
 }
 
@@ -2065,6 +2073,7 @@
 	struct be_adapter *adapter =
 		container_of(work, struct be_adapter, work.work);
 	struct be_rx_obj *rxo;
+	struct be_tx_obj *txo;
 	int i;
 
 	if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2092,7 +2101,9 @@
 		else
 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
 	}
-	be_tx_rate_update(adapter);
+
+	for_all_tx_queues(adapter, txo, i)
+		be_tx_rate_update(txo);
 
 	for_all_rx_queues(adapter, rxo, i) {
 		be_rx_rate_update(rxo);
@@ -2294,6 +2305,7 @@
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 	struct be_rx_obj *rxo;
+	struct be_tx_obj *txo;
 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
 	int vec, i;
 
@@ -2311,10 +2323,11 @@
 	napi_disable(&tx_eq->napi);
 
 	if (lancer_chip(adapter)) {
-		be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
 		be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
 		for_all_rx_queues(adapter, rxo, i)
 			 be_cq_notify(adapter, rxo->cq.id, false, 0);
+		for_all_tx_queues(adapter, txo, i)
+			 be_cq_notify(adapter, txo->cq.id, false, 0);
 	}
 
 	if (msix_enabled(adapter)) {
@@ -2333,7 +2346,8 @@
 	/* Wait for all pending tx completions to arrive so that
 	 * all tx skbs are freed.
 	 */
-	be_tx_compl_clean(adapter);
+	for_all_tx_queues(adapter, txo, i)
+		be_tx_compl_clean(adapter, txo);
 
 	return 0;
 }
@@ -2925,12 +2939,9 @@
 	netdev->features |= netdev->hw_features |
 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 
-	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
-	if (lancer_chip(adapter))
-		netdev->vlan_features |= NETIF_F_TSO6;
-
 	netdev->flags |= IFF_MULTICAST;
 
 	/* Default settings for Rx and Tx flow control */
@@ -3186,6 +3197,17 @@
 		return status;
 
 	be_cmd_check_native_mode(adapter);
+
+	if ((num_vfs && adapter->sriov_enabled) ||
+		(adapter->function_mode & 0x400) ||
+		lancer_chip(adapter) || !be_physfn(adapter)) {
+		adapter->num_tx_qs = 1;
+		netif_set_real_num_tx_queues(adapter->netdev,
+			adapter->num_tx_qs);
+	} else {
+		adapter->num_tx_qs = MAX_TX_QS;
+	}
+
 	return 0;
 }
 
@@ -3288,7 +3310,7 @@
 		goto disable_dev;
 	pci_set_master(pdev);
 
-	netdev = alloc_etherdev(sizeof(struct be_adapter));
+	netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
 	if (netdev == NULL) {
 		status = -ENOMEM;
 		goto rel_reg;
@@ -3396,6 +3418,11 @@
 	}
 
 	dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+	/* By default all priorities are enabled.
+	 * Needed in case of no GRP5 evt support
+	 */
+	adapter->vlan_prio_bmap = 0xff;
+
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
 	return 0;
 
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index a1b8c8b..d2e58e2 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -7,6 +7,7 @@
  * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
  * dynamic procfs inode.
  */
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 57d3293..74580bb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -416,6 +416,9 @@
 	struct bnx2 *bp = netdev_priv(dev);
 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
+	if (!cp->max_iscsi_conn)
+		return NULL;
+
 	cp->drv_owner = THIS_MODULE;
 	cp->chip_id = bp->chip_id;
 	cp->pdev = bp->pdev;
@@ -8177,6 +8180,10 @@
 	bp->timer.data = (unsigned long) bp;
 	bp->timer.function = bnx2_timer;
 
+#ifdef BCM_CNIC
+	bp->cnic_eth_dev.max_iscsi_conn =
+		bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN);
+#endif
 	pci_save_state(pdev);
 
 	return 0;
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index bb83a29..48fbdd4 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_BNX2X) += bnx2x.o
 
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 668a578..69fc728 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,14 +22,10 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.12-0"
-#define DRV_MODULE_RELDATE      "2011/03/20"
+#define DRV_MODULE_VERSION      "1.70.00-0"
+#define DRV_MODULE_RELDATE      "2011/06/13"
 #define BNX2X_BC_VER            0x040200
 
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
 #if defined(CONFIG_DCB)
 #define BCM_DCBNL
 #endif
@@ -47,11 +43,12 @@
 #endif
 
 #include <linux/mdio.h>
-#include <linux/pci.h>
+
 #include "bnx2x_reg.h"
 #include "bnx2x_fw_defs.h"
 #include "bnx2x_hsi.h"
 #include "bnx2x_link.h"
+#include "bnx2x_sp.h"
 #include "bnx2x_dcb.h"
 #include "bnx2x_stats.h"
 
@@ -80,6 +77,12 @@
 		       ##__args);				\
 } while (0)
 
+#define DP_CONT(__mask, __fmt, __args...)			\
+do {								\
+	if (bp->msg_enable & (__mask))				\
+		pr_cont(__fmt, ##__args);			\
+} while (0)
+
 /* errors debug print */
 #define BNX2X_DBG_ERR(__fmt, __args...)				\
 do {								\
@@ -111,7 +114,9 @@
 		dev_info(&bp->pdev->dev, __fmt, ##__args);	 \
 } while (0)
 
-void bnx2x_panic_dump(struct bnx2x *bp);
+#define BNX2X_MAC_FMT		"%pM"
+#define BNX2X_MAC_PRN_LIST(mac)	(mac)
+
 
 #ifdef BNX2X_STOP_ON_ERROR
 #define bnx2x_panic() do { \
@@ -233,11 +238,11 @@
  *
  */
 /* iSCSI L2 */
-#define BNX2X_ISCSI_ETH_CL_ID		17
+#define BNX2X_ISCSI_ETH_CL_ID_IDX	1
 #define BNX2X_ISCSI_ETH_CID		17
 
 /* FCoE L2 */
-#define BNX2X_FCOE_ETH_CL_ID		18
+#define BNX2X_FCOE_ETH_CL_ID_IDX	2
 #define BNX2X_FCOE_ETH_CID		18
 
 /** Additional rings budgeting */
@@ -283,44 +288,73 @@
 
 
 /* MC hsi */
-#define BCM_PAGE_SHIFT			12
-#define BCM_PAGE_SIZE			(1 << BCM_PAGE_SHIFT)
-#define BCM_PAGE_MASK			(~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_SHIFT		12
+#define BCM_PAGE_SIZE		(1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK		(~(BCM_PAGE_SIZE - 1))
 #define BCM_PAGE_ALIGN(addr)	(((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
 
-#define PAGES_PER_SGE_SHIFT		0
-#define PAGES_PER_SGE			(1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE			PAGE_SIZE
-#define SGE_PAGE_SHIFT			PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr)		PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define PAGES_PER_SGE_SHIFT	0
+#define PAGES_PER_SGE		(1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE		PAGE_SIZE
+#define SGE_PAGE_SHIFT		PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
 
 /* SGE ring related macros */
-#define NUM_RX_SGE_PAGES		2
+#define NUM_RX_SGE_PAGES	2
 #define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT			(RX_SGE_CNT - 2)
+#define MAX_RX_SGE_CNT		(RX_SGE_CNT - 2)
 /* RX_SGE_CNT is promised to be a power of 2 */
-#define RX_SGE_MASK			(RX_SGE_CNT - 1)
-#define NUM_RX_SGE			(RX_SGE_CNT * NUM_RX_SGE_PAGES)
-#define MAX_RX_SGE			(NUM_RX_SGE - 1)
+#define RX_SGE_MASK		(RX_SGE_CNT - 1)
+#define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE		(NUM_RX_SGE - 1)
 #define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \
 				  (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
-#define RX_SGE(x)			((x) & MAX_RX_SGE)
+#define RX_SGE(x)		((x) & MAX_RX_SGE)
 
-/* SGE producer mask related macros */
+/* Manipulate a bit vector defined as an array of u64 */
+
 /* Number of bits in one sge_mask array element */
-#define RX_SGE_MASK_ELEM_SZ		64
-#define RX_SGE_MASK_ELEM_SHIFT		6
-#define RX_SGE_MASK_ELEM_MASK		((u64)RX_SGE_MASK_ELEM_SZ - 1)
+#define BIT_VEC64_ELEM_SZ		64
+#define BIT_VEC64_ELEM_SHIFT		6
+#define BIT_VEC64_ELEM_MASK		((u64)BIT_VEC64_ELEM_SZ - 1)
+
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+	do { \
+		el = ((el) | ((u64)0x1 << (bit))); \
+	} while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+	do { \
+		el = ((el) & (~((u64)0x1 << (bit)))); \
+	} while (0)
+
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+	__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			   (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+	__BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			     (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+	(((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+	((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
 
 /* Creates a bitmask of all ones in less significant bits.
    idx - index of the most significant bit in the created mask */
-#define RX_SGE_ONES_MASK(idx) \
-		(((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
-#define RX_SGE_MASK_ELEM_ONE_MASK	((u64)(~0))
+#define BIT_VEC64_ONES_MASK(idx) \
+		(((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK	((u64)(~0))
+
+/*******************************************************/
+
+
 
 /* Number of u64 elements in SGE mask array */
 #define RX_SGE_MASK_LEN			((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
-					 RX_SGE_MASK_ELEM_SZ)
+					 BIT_VEC64_ELEM_SZ)
 #define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
 #define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
@@ -331,7 +365,30 @@
 	struct host_hc_status_block_e2  *e2_sb;
 };
 
+struct bnx2x_agg_info {
+	/*
+	 * First aggregation buffer is an skb, the following - are pages.
+	 * We will preallocate the skbs for each aggregation when
+	 * we open the interface and will replace the BD at the consumer
+	 * with this one when we receive the TPA_START CQE in order to
+	 * keep the Rx BD ring consistent.
+	 */
+	struct sw_rx_bd		first_buf;
+	u8			tpa_state;
+#define BNX2X_TPA_START			1
+#define BNX2X_TPA_STOP			2
+#define BNX2X_TPA_ERROR			3
+	u8			placement_offset;
+	u16			parsing_flags;
+	u16			vlan_tag;
+	u16			len_on_bd;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
 struct bnx2x_fastpath {
+	struct bnx2x		*bp; /* parent */
 
 #define BNX2X_NAPI_WEIGHT       128
 	struct napi_struct	napi;
@@ -366,23 +423,13 @@
 
 	u64			sge_mask[RX_SGE_MASK_LEN];
 
-	int			state;
-#define BNX2X_FP_STATE_CLOSED		0
-#define BNX2X_FP_STATE_IRQ		0x80000
-#define BNX2X_FP_STATE_OPENING		0x90000
-#define BNX2X_FP_STATE_OPEN		0xa0000
-#define BNX2X_FP_STATE_HALTING		0xb0000
-#define BNX2X_FP_STATE_HALTED		0xc0000
-#define BNX2X_FP_STATE_TERMINATING	0xd0000
-#define BNX2X_FP_STATE_TERMINATED	0xe0000
+	u32			cid;
 
 	u8			index;		/* number in fp array */
 	u8			cl_id;		/* eth client id */
 	u8			cl_qzone_id;
 	u8			fw_sb_id;	/* status block number in FW */
 	u8			igu_sb_id;	/* status block number in HW */
-	u32			cid;
-
 	union db_prod		tx_db;
 
 	u16			tx_pkt_prod;
@@ -401,24 +448,20 @@
 	/* The last maximal completed SGE */
 	u16			last_max_sge;
 	__le16			*rx_cons_sb;
-
 	unsigned long		tx_pkt,
 				rx_pkt,
 				rx_calls;
 
 	/* TPA related */
-	struct sw_rx_bd		tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
-	u8			tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
-#define BNX2X_TPA_START			1
-#define BNX2X_TPA_STOP			2
+	struct bnx2x_agg_info	tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
 	u8			disable_tpa;
 #ifdef BNX2X_STOP_ON_ERROR
 	u64			tpa_queue_used;
 #endif
 
-	struct tstorm_per_client_stats old_tclient;
-	struct ustorm_per_client_stats old_uclient;
-	struct xstorm_per_client_stats old_xclient;
+	struct tstorm_per_queue_stats old_tclient;
+	struct ustorm_per_queue_stats old_uclient;
+	struct xstorm_per_queue_stats old_xclient;
 	struct bnx2x_eth_q_stats eth_q_stats;
 
 	/* The size is calculated using the following:
@@ -427,7 +470,13 @@
 	     4 (for the digits and to make it DWORD aligned) */
 #define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
 	char			name[FP_NAME_SIZE];
-	struct bnx2x		*bp; /* parent */
+
+	/* MACs object */
+	struct bnx2x_vlan_mac_obj mac_obj;
+
+	/* Queue State object */
+	struct bnx2x_queue_sp_obj q_obj;
+
 };
 
 #define bnx2x_fp(bp, nr, var)		(bp->fp[nr].var)
@@ -435,11 +484,13 @@
 /* Use 2500 as a mini-jumbo MTU for FCoE */
 #define BNX2X_FCOE_MINI_JUMBO_MTU	2500
 
-#ifdef BCM_CNIC
-/* FCoE L2 `fastpath' is right after the eth entries */
+/* FCoE L2 `fastpath' entry is right after the eth entries */
 #define FCOE_IDX			BNX2X_NUM_ETH_QUEUES(bp)
 #define bnx2x_fcoe_fp(bp)		(&bp->fp[FCOE_IDX])
 #define bnx2x_fcoe(bp, var)		(bnx2x_fcoe_fp(bp)->var)
+
+
+#ifdef BCM_CNIC
 #define IS_FCOE_FP(fp)			(fp->index == FCOE_IDX)
 #define IS_FCOE_IDX(idx)		((idx) == FCOE_IDX)
 #else
@@ -449,77 +500,68 @@
 
 
 /* MC hsi */
-#define MAX_FETCH_BD			13	/* HW max BDs per packet */
-#define RX_COPY_THRESH			92
+#define MAX_FETCH_BD		13	/* HW max BDs per packet */
+#define RX_COPY_THRESH		92
 
-#define NUM_TX_RINGS			16
+#define NUM_TX_RINGS		16
 #define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT			(TX_DESC_CNT - 1)
-#define NUM_TX_BD			(TX_DESC_CNT * NUM_TX_RINGS)
-#define MAX_TX_BD			(NUM_TX_BD - 1)
-#define MAX_TX_AVAIL			(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
-#define INIT_JUMBO_TX_RING_SIZE		MAX_TX_AVAIL
-#define INIT_TX_RING_SIZE		MAX_TX_AVAIL
+#define MAX_TX_DESC_CNT		(TX_DESC_CNT - 1)
+#define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD		(NUM_TX_BD - 1)
+#define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
 #define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \
 				  (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
-#define TX_BD(x)			((x) & MAX_TX_BD)
-#define TX_BD_POFF(x)			((x) & MAX_TX_DESC_CNT)
+#define TX_BD(x)		((x) & MAX_TX_BD)
+#define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)
 
 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
-#define NUM_RX_RINGS			8
+#define NUM_RX_RINGS		8
 #define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT			(RX_DESC_CNT - 2)
-#define RX_DESC_MASK			(RX_DESC_CNT - 1)
-#define NUM_RX_BD			(RX_DESC_CNT * NUM_RX_RINGS)
-#define MAX_RX_BD			(NUM_RX_BD - 1)
-#define MAX_RX_AVAIL			(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_SIZE_TPA			72
-#define MIN_RX_SIZE_NONTPA		10
-#define INIT_JUMBO_RX_RING_SIZE		MAX_RX_AVAIL
-#define INIT_RX_RING_SIZE		MAX_RX_AVAIL
+#define MAX_RX_DESC_CNT		(RX_DESC_CNT - 2)
+#define RX_DESC_MASK		(RX_DESC_CNT - 1)
+#define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD		(NUM_RX_BD - 1)
+#define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define MIN_RX_AVAIL		128
+
+#define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \
+					ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+					ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA		(max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA	(max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+								MIN_RX_AVAIL))
+
 #define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \
 				  (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
-#define RX_BD(x)			((x) & MAX_RX_BD)
+#define RX_BD(x)		((x) & MAX_RX_BD)
 
-/* As long as CQE is 4 times bigger than BD entry we have to allocate
-   4 times more pages for CQ ring in order to keep it balanced with
-   BD ring */
-#define NUM_RCQ_RINGS			(NUM_RX_RINGS * 4)
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)
 #define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT		(RCQ_DESC_CNT - 1)
-#define NUM_RCQ_BD			(RCQ_DESC_CNT * NUM_RCQ_RINGS)
-#define MAX_RCQ_BD			(NUM_RCQ_BD - 1)
-#define MAX_RCQ_AVAIL			(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - 1)
+#define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD		(NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
 #define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \
 				  (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
-#define RCQ_BD(x)			((x) & MAX_RCQ_BD)
+#define RCQ_BD(x)		((x) & MAX_RCQ_BD)
 
 
 /* This is needed for determining of last_max */
-#define SUB_S16(a, b)			(s16)((s16)(a) - (s16)(b))
+#define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b)		(s32)((s32)(a) - (s32)(b))
 
-#define __SGE_MASK_SET_BIT(el, bit) \
-	do { \
-		el = ((el) | ((u64)0x1 << (bit))); \
-	} while (0)
 
-#define __SGE_MASK_CLEAR_BIT(el, bit) \
-	do { \
-		el = ((el) & (~((u64)0x1 << (bit)))); \
-	} while (0)
-
-#define SGE_MASK_SET_BIT(fp, idx) \
-	__SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
-			   ((idx) & RX_SGE_MASK_ELEM_MASK))
-
-#define SGE_MASK_CLEAR_BIT(fp, idx) \
-	__SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
-			     ((idx) & RX_SGE_MASK_ELEM_MASK))
-
+#define BNX2X_SWCID_SHIFT	17
+#define BNX2X_SWCID_MASK	((0x1 << BNX2X_SWCID_SHIFT) - 1)
 
 /* used on a CID received from the HW */
-#define SW_CID(x)			(le32_to_cpu(x) & \
-					 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
+#define SW_CID(x)			(le32_to_cpu(x) & BNX2X_SWCID_MASK)
 #define CQE_CMD(x)			(le32_to_cpu(x) >> \
 					COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
 
@@ -529,6 +571,9 @@
 
 #define BNX2X_DB_MIN_SHIFT		3	/* 8 bytes */
 #define BNX2X_DB_SHIFT			7	/* 128 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
 #define DPM_TRIGER_TYPE			0x40
 #define DOORBELL(bp, cid, val) \
 	do { \
@@ -557,13 +602,11 @@
 
 
 /* stuff added to make the code fit 80Col */
-
-#define CQE_TYPE(cqe_fp_flags)	((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
-
-#define TPA_TYPE_START			ETH_FAST_PATH_RX_CQE_START_FLG
-#define TPA_TYPE_END			ETH_FAST_PATH_RX_CQE_END_FLG
-#define TPA_TYPE(cqe_fp_flags)		((cqe_fp_flags) & \
-					 (TPA_TYPE_START | TPA_TYPE_END))
+#define CQE_TYPE(cqe_fp_flags)	 ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
 
 #define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
@@ -590,12 +633,30 @@
 #define BNX2X_RX_SUM_FIX(cqe) \
 	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
 
-#define U_SB_ETH_RX_CQ_INDEX		1
-#define U_SB_ETH_RX_BD_INDEX		2
-#define C_SB_ETH_TX_CQ_INDEX		5
+
+#define FP_USB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_TOE_RX_CQ_CONS		0 /* Formerly Ustorm TOE CQ index */
+					  /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
+#define HC_INDEX_ETH_RX_CQ_CONS		1 /* Formerly Ustorm ETH CQ index */
+					  /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
+#define HC_INDEX_ETH_RX_BD_CONS		2 /* Formerly Ustorm ETH BD index */
+					  /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
+
+#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */
+					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_TX_CQ_CONS		5 /* Formerly Cstorm ETH CQ index   */
+					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+
+#define U_SB_ETH_RX_CQ_INDEX		HC_INDEX_ETH_RX_CQ_CONS
+#define U_SB_ETH_RX_BD_INDEX		HC_INDEX_ETH_RX_BD_CONS
+#define C_SB_ETH_TX_CQ_INDEX		HC_INDEX_ETH_TX_CQ_CONS
 
 #define BNX2X_RX_SB_INDEX \
-	(&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
+	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
 
 #define BNX2X_TX_SB_INDEX \
 	(&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
@@ -615,41 +676,74 @@
 #define CHIP_NUM_57711			0x164f
 #define CHIP_NUM_57711E			0x1650
 #define CHIP_NUM_57712			0x1662
-#define CHIP_NUM_57712E			0x1663
+#define CHIP_NUM_57712_MF		0x1663
+#define CHIP_NUM_57713			0x1651
+#define CHIP_NUM_57713E			0x1652
+#define CHIP_NUM_57800			0x168a
+#define CHIP_NUM_57800_MF		0x16a5
+#define CHIP_NUM_57810			0x168e
+#define CHIP_NUM_57810_MF		0x16ae
+#define CHIP_NUM_57840			0x168d
+#define CHIP_NUM_57840_MF		0x16ab
 #define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
 #define CHIP_IS_57711(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711)
 #define CHIP_IS_57711E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711E)
 #define CHIP_IS_57712(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712)
-#define CHIP_IS_57712E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712E)
+#define CHIP_IS_57712_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57840(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840)
+#define CHIP_IS_57840_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 #define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
 					 CHIP_IS_57711E(bp))
 #define CHIP_IS_E2(bp)			(CHIP_IS_57712(bp) || \
-					 CHIP_IS_57712E(bp))
+					 CHIP_IS_57712_MF(bp))
+#define CHIP_IS_E3(bp)			(CHIP_IS_57800(bp) || \
+					 CHIP_IS_57800_MF(bp) || \
+					 CHIP_IS_57810(bp) || \
+					 CHIP_IS_57810_MF(bp) || \
+					 CHIP_IS_57840(bp) || \
+					 CHIP_IS_57840_MF(bp))
 #define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
-#define IS_E1H_OFFSET			(CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
+#define USES_WARPCORE(bp)		(CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET			(!CHIP_IS_E1(bp))
 
-#define CHIP_REV(bp)			(bp->common.chip_id & 0x0000f000)
-#define CHIP_REV_Ax			0x00000000
+#define CHIP_REV_SHIFT			12
+#define CHIP_REV_MASK			(0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp)		(bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax			(0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx			(0x1 << CHIP_REV_SHIFT)
 /* assume maximum 5 revisions */
-#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV(bp) > 0x00005000)
+#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV_VAL(bp) > 0x00005000)
 /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
 #define CHIP_REV_IS_EMUL(bp)		((CHIP_REV_IS_SLOW(bp)) && \
-					 !(CHIP_REV(bp) & 0x00001000))
+					 !(CHIP_REV_VAL(bp) & 0x00001000))
 /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
 #define CHIP_REV_IS_FPGA(bp)		((CHIP_REV_IS_SLOW(bp)) && \
-					 (CHIP_REV(bp) & 0x00001000))
+					 (CHIP_REV_VAL(bp) & 0x00001000))
 
 #define CHIP_TIME(bp)			((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
 					((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
 
 #define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
 #define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
-#define CHIP_PARITY_ENABLED(bp)	(CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+#define CHIP_REV_SIM(bp)		(((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+					   (CHIP_REV_SHIFT + 1)) \
+						<< CHIP_REV_SHIFT)
+#define CHIP_REV(bp)			(CHIP_REV_IS_SLOW(bp) ? \
+						CHIP_REV_SIM(bp) :\
+						CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Ax))
 
 	int			flash_size;
-#define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
-#define NVRAM_TIMEOUT_COUNT		30000
-#define NVRAM_PAGE_SIZE			256
+#define BNX2X_NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT		30000
+#define BNX2X_NVRAM_PAGE_SIZE			256
 
 	u32			shmem_base;
 	u32			shmem2_base;
@@ -666,7 +760,7 @@
 #define INT_BLOCK_MODE_NORMAL		0
 #define INT_BLOCK_MODE_BW_COMP		2
 #define CHIP_INT_MODE_IS_NBC(bp)		\
-			(CHIP_IS_E2(bp) &&	\
+			(!CHIP_IS_E1x(bp) &&	\
 			!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
 #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
 
@@ -712,19 +806,15 @@
 
 /* end of port */
 
-/* e1h Classification CAM line allocations */
-enum {
-	CAM_ETH_LINE = 0,
-	CAM_ISCSI_ETH_LINE,
-	CAM_FIP_ETH_LINE,
-	CAM_FIP_MCAST_LINE,
-	CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
-};
-/* number of MACs per function in NIG memory - used for SI mode */
-#define NIG_LLH_FUNC_MEM_SIZE		16
-/* number of entries in NIG_REG_LLHX_FUNC_MEM */
-#define NIG_LLH_FUNC_MEM_MAX_OFFSET	8
+#define STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 
+/* slow path */
+
+/* slow path work-queue */
+extern struct workqueue_struct *bnx2x_wq;
+
+#define BNX2X_MAX_NUM_OF_VFS	64
 #define BNX2X_VF_ID_INVALID	0xFF
 
 /*
@@ -749,8 +839,10 @@
  *    L2 queue is supported. the cid for the FCoE L2 queue is always X.
  */
 
-#define FP_SB_MAX_E1x		16	/* fast-path interrupt contexts E1x */
-#define FP_SB_MAX_E2		16	/* fast-path interrupt contexts E2 */
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x		16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2		HC_SB_MAX_SB_E2
 
 /*
  * cid_cnt paramter below refers to the value returned by
@@ -761,13 +853,13 @@
  * The number of FP context allocated by the driver == max number of regular
  * L2 queues + 1 for the FCoE L2 queue
  */
-#define L2_FP_COUNT(cid_cnt)	((cid_cnt) - CNIC_CONTEXT_USE)
+#define L2_FP_COUNT(cid_cnt)	((cid_cnt) - FCOE_CONTEXT_USE)
 
 /*
  * The number of FP-SB allocated by the driver == max number of regular L2
  * queues + 1 for the CNIC which also consumes an FP-SB
  */
-#define FP_SB_COUNT(cid_cnt)	((cid_cnt) - FCOE_CONTEXT_USE)
+#define FP_SB_COUNT(cid_cnt)	((cid_cnt) - CNIC_CONTEXT_USE)
 #define NUM_IGU_SB_REQUIRED(cid_cnt) \
 				(FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
 
@@ -788,38 +880,61 @@
 #define CNIC_ILT_LINES		DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
 #endif
 
-#define QM_ILT_PAGE_SZ_HW	3
-#define QM_ILT_PAGE_SZ		(4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
+#define QM_ILT_PAGE_SZ_HW	0
+#define QM_ILT_PAGE_SZ		(4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
 #define QM_CID_ROUND		1024
 
 #ifdef BCM_CNIC
 /* TM (timers) host DB constants */
-#define TM_ILT_PAGE_SZ_HW	2
-#define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
+#define TM_ILT_PAGE_SZ_HW	0
+#define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
 /* #define TM_CONN_NUM		(CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
 #define TM_CONN_NUM		1024
 #define TM_ILT_SZ		(8 * TM_CONN_NUM)
 #define TM_ILT_LINES		DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 
 /* SRC (Searcher) host DB constants */
-#define SRC_ILT_PAGE_SZ_HW	3
-#define SRC_ILT_PAGE_SZ		(4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
+#define SRC_ILT_PAGE_SZ_HW	0
+#define SRC_ILT_PAGE_SZ		(4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
 #define SRC_HASH_BITS		10
 #define SRC_CONN_NUM		(1 << SRC_HASH_BITS) /* 1024 */
 #define SRC_ILT_SZ		(sizeof(struct src_ent) * SRC_CONN_NUM)
 #define SRC_T2_SZ		SRC_ILT_SZ
 #define SRC_ILT_LINES		DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
 #endif
 
-#define MAX_DMAE_C			8
+#define MAX_DMAE_C		8
 
 /* DMA memory not used in fastpath */
 struct bnx2x_slowpath {
-	struct eth_stats_query		fw_stats;
-	struct mac_configuration_cmd	mac_config;
-	struct mac_configuration_cmd	mcast_config;
-	struct mac_configuration_cmd	uc_mac_config;
-	struct client_init_ramrod_data	client_init_data;
+	union {
+		struct mac_configuration_cmd		e1x;
+		struct eth_classify_rules_ramrod_data	e2;
+	} mac_rdata;
+
+
+	union {
+		struct tstorm_eth_mac_filter_config	e1x;
+		struct eth_filter_rules_ramrod_data	e2;
+	} rx_mode_rdata;
+
+	union {
+		struct mac_configuration_cmd		e1;
+		struct eth_multicast_rules_ramrod_data  e2;
+	} mcast_rdata;
+
+	struct eth_rss_update_ramrod_data	rss_rdata;
+
+	/* Queue State related ramrods are always sent under rtnl_lock */
+	union {
+		struct client_init_ramrod_data  init_data;
+		struct client_update_ramrod_data update_data;
+	} q_rdata;
+
+	union {
+		struct function_start_data	func_start;
+	} func_rdata;
 
 	/* used by dmae command executer */
 	struct dmae_command		dmae[MAX_DMAE_C];
@@ -846,7 +961,7 @@
 #define MAX_DYNAMIC_ATTN_GRPS		8
 
 struct attn_route {
-	u32	sig[5];
+	u32 sig[5];
 };
 
 struct iro {
@@ -866,13 +981,15 @@
 /* forward */
 struct bnx2x_ilt;
 
-typedef enum {
+
+enum bnx2x_recovery_state {
 	BNX2X_RECOVERY_DONE,
 	BNX2X_RECOVERY_INIT,
 	BNX2X_RECOVERY_WAIT,
-} bnx2x_recovery_state_t;
+	BNX2X_RECOVERY_FAILED
+};
 
-/**
+/*
  * Event queue (EQ or event ring) MC hsi
  * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
  */
@@ -910,6 +1027,24 @@
 	BNX2X_LINK_REPORT_TX_FC_ON,
 };
 
+enum {
+	BNX2X_PORT_QUERY_IDX,
+	BNX2X_PF_QUERY_IDX,
+	BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+	struct stats_query_header hdr;
+	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+struct bnx2x_fw_stats_data {
+	struct stats_counter	storm_counters;
+	struct per_port_stats	port;
+	struct per_pf_stats	pf;
+	struct per_queue_stats  queue_stats[1];
+};
+
 struct bnx2x {
 	/* Fields used in the tx and intr/napi performance paths
 	 * are grouped together in the beginning of the structure
@@ -919,19 +1054,28 @@
 	void __iomem		*doorbells;
 	u16			db_size;
 
+	u8			pf_num;	/* absolute PF number */
+	u8			pfid;	/* per-path PF number */
+	int			base_fw_ndsb; /**/
+#define BP_PATH(bp)			(CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp)			(bp->pfid & 1)
+#define BP_FUNC(bp)			(bp->pfid)
+#define BP_ABS_FUNC(bp)			(bp->pf_num)
+#define BP_E1HVN(bp)			(bp->pfid >> 1)
+#define BP_VN(bp)			(BP_E1HVN(bp)) /*remove when approved*/
+#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
+#define BP_FW_MB_IDX(bp)		(BP_PORT(bp) +\
+					 BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2  : 1))
+
 	struct net_device	*dev;
 	struct pci_dev		*pdev;
 
-	struct iro		*iro_arr;
+	const struct iro	*iro_arr;
 #define IRO (bp->iro_arr)
 
-	atomic_t		intr_sem;
-
-	bnx2x_recovery_state_t	recovery_state;
+	enum bnx2x_recovery_state recovery_state;
 	int			is_leader;
 	struct msix_entry	*msix_table;
-#define INT_MODE_INTx			1
-#define INT_MODE_MSI			2
 
 	int			tx_ring_size;
 
@@ -944,7 +1088,8 @@
 	/* Max supported alignment is 256 (8 shift) */
 #define BNX2X_RX_ALIGN_SHIFT		((L1_CACHE_SHIFT < 8) ? \
 					 L1_CACHE_SHIFT : 8)
-#define BNX2X_RX_ALIGN			(1 << BNX2X_RX_ALIGN_SHIFT)
+	/* FW use 2 Cache lines Alignment for start packet and size  */
+#define BNX2X_FW_RX_ALIGN		(2 << BNX2X_RX_ALIGN_SHIFT)
 #define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
 
 	struct host_sp_status_block *def_status_blk;
@@ -974,10 +1119,12 @@
 	__le16			*eq_cons_sb;
 	atomic_t		eq_spq_left; /* COMMON_XXX ramrods credit */
 
-	/* Flags for marking that there is a STAT_QUERY or
-	   SET_MAC ramrod pending */
-	int			stats_pending;
-	int			set_mac_pending;
+
+
+	/* Counter for marking that there is a STAT_QUERY ramrod pending */
+	u16			stats_pending;
+	/*  Counter for completed statistics ramrods */
+	u16			stats_comp;
 
 	/* End of fields used in the performance code paths */
 
@@ -985,47 +1132,27 @@
 	int			msg_enable;
 
 	u32			flags;
-#define PCIX_FLAG			1
-#define PCI_32BIT_FLAG			2
-#define ONE_PORT_FLAG			4
-#define NO_WOL_FLAG			8
-#define USING_DAC_FLAG			0x10
-#define USING_MSIX_FLAG			0x20
-#define USING_MSI_FLAG			0x40
+#define PCIX_FLAG			(1 << 0)
+#define PCI_32BIT_FLAG			(1 << 1)
+#define ONE_PORT_FLAG			(1 << 2)
+#define NO_WOL_FLAG			(1 << 3)
+#define USING_DAC_FLAG			(1 << 4)
+#define USING_MSIX_FLAG			(1 << 5)
+#define USING_MSI_FLAG			(1 << 6)
+#define DISABLE_MSI_FLAG		(1 << 7)
+#define TPA_ENABLE_FLAG			(1 << 8)
+#define NO_MCP_FLAG			(1 << 9)
 
-#define TPA_ENABLE_FLAG			0x80
-#define NO_MCP_FLAG			0x100
-#define DISABLE_MSI_FLAG		0x200
 #define BP_NOMCP(bp)			(bp->flags & NO_MCP_FLAG)
-#define MF_FUNC_DIS			0x1000
-#define FCOE_MACS_SET			0x2000
-#define NO_FCOE_FLAG			0x4000
-#define NO_ISCSI_OOO_FLAG		0x8000
-#define NO_ISCSI_FLAG			0x10000
+#define MF_FUNC_DIS			(1 << 11)
+#define OWN_CNIC_IRQ			(1 << 12)
+#define NO_ISCSI_OOO_FLAG		(1 << 13)
+#define NO_ISCSI_FLAG			(1 << 14)
+#define NO_FCOE_FLAG			(1 << 15)
 
-#define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
 #define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
-
-	int			pf_num;	/* absolute PF number */
-	int			pfid;	/* per-path PF number */
-	int			base_fw_ndsb;
-#define BP_PATH(bp)			(!CHIP_IS_E2(bp) ? \
-						0 : (bp->pf_num & 1))
-#define BP_PORT(bp)			(bp->pfid & 1)
-#define BP_FUNC(bp)			(bp->pfid)
-#define BP_ABS_FUNC(bp)			(bp->pf_num)
-#define BP_E1HVN(bp)			(bp->pfid >> 1)
-#define BP_VN(bp)			(CHIP_MODE_IS_4_PORT(bp) ? \
-						0 : BP_E1HVN(bp))
-#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp)		(BP_PORT(bp) +\
-					 BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2  : 1))
-
-#ifdef BCM_CNIC
-#define BCM_CNIC_CID_START		16
-#define BCM_ISCSI_ETH_CL_ID		17
-#endif
+#define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
 
 	int			pm_cap;
 	int			pcie_cap;
@@ -1033,6 +1160,8 @@
 
 	struct delayed_work	sp_task;
 	struct delayed_work	reset_task;
+
+	struct delayed_work	period_task;
 	struct timer_list	timer;
 	int			current_interval;
 
@@ -1052,9 +1181,9 @@
 
 	struct cmng_struct_per_port cmng;
 	u32			vn_weight_sum;
-
 	u32			mf_config[E1HVN_MAX];
 	u32			mf2_config[E2_FUNC_MAX];
+	u32			path_has_ovlan; /* E3 */
 	u16			mf_ov;
 	u8			mf_mode;
 #define IS_MF(bp)		(bp->mf_mode != 0)
@@ -1079,33 +1208,20 @@
 
 	u32			lin_cnt;
 
-	int			state;
+	u16			state;
 #define BNX2X_STATE_CLOSED		0
 #define BNX2X_STATE_OPENING_WAIT4_LOAD	0x1000
 #define BNX2X_STATE_OPENING_WAIT4_PORT	0x2000
 #define BNX2X_STATE_OPEN		0x3000
 #define BNX2X_STATE_CLOSING_WAIT4_HALT	0x4000
 #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
-#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
-#define BNX2X_STATE_FUNC_STARTED	0x7000
+
 #define BNX2X_STATE_DIAG		0xe000
 #define BNX2X_STATE_ERROR		0xf000
 
 	int			multi_mode;
 	int			num_queues;
 	int			disable_tpa;
-	int			int_mode;
-	u32			*rx_indir_table;
-
-	struct tstorm_eth_mac_filter_config	mac_filters;
-#define BNX2X_ACCEPT_NONE		0x0000
-#define BNX2X_ACCEPT_UNICAST		0x0001
-#define BNX2X_ACCEPT_MULTICAST		0x0002
-#define BNX2X_ACCEPT_ALL_UNICAST	0x0004
-#define BNX2X_ACCEPT_ALL_MULTICAST	0x0008
-#define BNX2X_ACCEPT_BROADCAST		0x0010
-#define BNX2X_ACCEPT_UNMATCHED_UCAST	0x0020
-#define BNX2X_PROMISCUOUS_MODE		0x10000
 
 	u32			rx_mode;
 #define BNX2X_RX_MODE_NONE		0
@@ -1113,7 +1229,6 @@
 #define BNX2X_RX_MODE_ALLMULTI		2
 #define BNX2X_RX_MODE_PROMISC		3
 #define BNX2X_MAX_MULTICAST		64
-#define BNX2X_MAX_EMUL_MULTI		16
 
 	u8			igu_dsb_id;
 	u8			igu_base_sb;
@@ -1122,11 +1237,38 @@
 
 	struct bnx2x_slowpath	*slowpath;
 	dma_addr_t		slowpath_mapping;
+
+	/* Total number of FW statistics requests */
+	u8			fw_stats_num;
+
+	/*
+	 * This is a memory buffer that will contain both statistics
+	 * ramrod request and data.
+	 */
+	void			*fw_stats;
+	dma_addr_t		fw_stats_mapping;
+
+	/*
+	 * FW statistics request shortcut (points at the
+	 * beginning of fw_stats buffer).
+	 */
+	struct bnx2x_fw_stats_req	*fw_stats_req;
+	dma_addr_t			fw_stats_req_mapping;
+	int				fw_stats_req_sz;
+
+	/*
+	 * FW statistics data shortcut (points at the begining of
+	 * fw_stats buffer + fw_stats_req_sz).
+	 */
+	struct bnx2x_fw_stats_data	*fw_stats_data;
+	dma_addr_t			fw_stats_data_mapping;
+	int				fw_stats_data_sz;
+
 	struct hw_context	context;
 
 	struct bnx2x_ilt	*ilt;
 #define BP_ILT(bp)		((bp)->ilt)
-#define ILT_MAX_LINES		128
+#define ILT_MAX_LINES		256
 
 	int			l2_cid_count;
 #define L2_ILT_LINES(bp)	(DIV_ROUND_UP((bp)->l2_cid_count, \
@@ -1148,16 +1290,18 @@
 	struct cnic_eth_dev	cnic_eth_dev;
 	union host_hc_status_block cnic_sb;
 	dma_addr_t		cnic_sb_mapping;
-#define CNIC_SB_ID(bp)		((bp)->base_fw_ndsb + BP_L_ID(bp))
-#define CNIC_IGU_SB_ID(bp)	((bp)->igu_base_sb)
 	struct eth_spe		*cnic_kwq;
 	struct eth_spe		*cnic_kwq_prod;
 	struct eth_spe		*cnic_kwq_cons;
 	struct eth_spe		*cnic_kwq_last;
 	u16			cnic_kwq_pending;
 	u16			cnic_spq_pending;
-	struct mutex		cnic_mutex;
 	u8			fip_mac[ETH_ALEN];
+	struct mutex		cnic_mutex;
+	struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+	/* Start index of the "special" (CNIC related) L2 cleints */
+	u8				cnic_base_cl_id;
 #endif
 
 	int			dmae_ready;
@@ -1194,6 +1338,8 @@
 	u16			*init_ops_offsets;
 	/* Data blob - has 32 bit granularity */
 	u32			*init_data;
+	u32			init_mode_flags;
+#define INIT_MODE_FLAGS(bp)	(bp->init_mode_flags)
 	/* Zipped PRAM blobs - raw data */
 	const u8		*tsem_int_table_data;
 	const u8		*tsem_pram_data;
@@ -1215,8 +1361,10 @@
 #define INIT_CSEM_INT_TABLE_DATA(bp)	(bp->csem_int_table_data)
 #define INIT_CSEM_PRAM_DATA(bp)		(bp->csem_pram_data)
 
+#define PHY_FW_VER_LEN			20
 	char			fw_ver[32];
 	const struct firmware	*firmware;
+
 	/* LLDP params */
 	struct bnx2x_config_lldp_params		lldp_config_params;
 
@@ -1235,13 +1383,30 @@
 	bool dcbx_mode_uset;
 
 	struct bnx2x_config_dcbx_params		dcbx_config_params;
-
 	struct bnx2x_dcbx_port_params		dcbx_port_params;
 	int					dcb_version;
 
-	/* DCBX Negotiation results */
+	/* CAM credit pools */
+	struct bnx2x_credit_pool_obj		macs_pool;
+
+	/* RX_MODE object */
+	struct bnx2x_rx_mode_obj		rx_mode_obj;
+
+	/* MCAST object */
+	struct bnx2x_mcast_obj			mcast_obj;
+
+	/* RSS configuration object */
+	struct bnx2x_rss_config_obj		rss_conf_obj;
+
+	/* Function State controlling object */
+	struct bnx2x_func_sp_obj		func_obj;
+
+	unsigned long				sp_state;
+
+	/* DCBX Negotation results */
 	struct dcbx_features			dcbx_local_feat;
 	u32					dcbx_error;
+
 #ifdef BCM_DCBNL
 	struct dcbx_features			dcbx_remote_feat;
 	u32					dcbx_remote_flags;
@@ -1249,42 +1414,11 @@
 	u32					pending_max;
 };
 
-/**
- *	Init queue/func interface
- */
-/* queue init flags */
-#define QUEUE_FLG_TPA		0x0001
-#define QUEUE_FLG_CACHE_ALIGN	0x0002
-#define QUEUE_FLG_STATS		0x0004
-#define QUEUE_FLG_OV		0x0008
-#define QUEUE_FLG_VLAN		0x0010
-#define QUEUE_FLG_COS		0x0020
-#define QUEUE_FLG_HC		0x0040
-#define QUEUE_FLG_DHC		0x0080
-#define QUEUE_FLG_OOO		0x0100
-
-#define QUEUE_DROP_IP_CS_ERR	TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
-#define QUEUE_DROP_TCP_CS_ERR	TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
-#define QUEUE_DROP_TTL0		TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
-#define QUEUE_DROP_UDP_CS_ERR	TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
-
-
-
-/* rss capabilities */
-#define RSS_IPV4_CAP		0x0001
-#define RSS_IPV4_TCP_CAP	0x0002
-#define RSS_IPV6_CAP		0x0004
-#define RSS_IPV6_TCP_CAP	0x0008
-
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
 #define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
 #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
 
-/* ethtool statistics are displayed for all regular ethernet queues and the
- * fcoe L2 queue if not disabled
- */
-#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
-			   (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
-
 #define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
 
 #define BNX2X_MAX_QUEUES(bp)	(bp->igu_sb_cnt - CNIC_CONTEXT_USE)
@@ -1302,107 +1436,15 @@
 	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
 /* func init flags */
-#define FUNC_FLG_STATS		0x0001
-#define FUNC_FLG_TPA		0x0002
-#define FUNC_FLG_SPQ		0x0004
-#define FUNC_FLG_LEADING	0x0008	/* PF only */
+#define FUNC_FLG_RSS		0x0001
+#define FUNC_FLG_STATS		0x0002
+/* removed  FUNC_FLG_UNMATCHED	0x0004 */
+#define FUNC_FLG_TPA		0x0008
+#define FUNC_FLG_SPQ		0x0010
+#define FUNC_FLG_LEADING	0x0020	/* PF only */
 
-struct rxq_pause_params {
-	u16		bd_th_lo;
-	u16		bd_th_hi;
-	u16		rcq_th_lo;
-	u16		rcq_th_hi;
-	u16		sge_th_lo; /* valid iff QUEUE_FLG_TPA */
-	u16		sge_th_hi; /* valid iff QUEUE_FLG_TPA */
-	u16		pri_map;
-};
-
-struct bnx2x_rxq_init_params {
-	/* cxt*/
-	struct eth_context *cxt;
-
-	/* dma */
-	dma_addr_t	dscr_map;
-	dma_addr_t	sge_map;
-	dma_addr_t	rcq_map;
-	dma_addr_t	rcq_np_map;
-
-	u16		flags;
-	u16		drop_flags;
-	u16		mtu;
-	u16		buf_sz;
-	u16		fw_sb_id;
-	u16		cl_id;
-	u16		spcl_id;
-	u16		cl_qzone_id;
-
-	/* valid iff QUEUE_FLG_STATS */
-	u16		stat_id;
-
-	/* valid iff QUEUE_FLG_TPA */
-	u16		tpa_agg_sz;
-	u16		sge_buf_sz;
-	u16		max_sges_pkt;
-
-	/* valid iff QUEUE_FLG_CACHE_ALIGN */
-	u8		cache_line_log;
-
-	u8		sb_cq_index;
-	u32		cid;
-
-	/* desired interrupts per sec. valid iff QUEUE_FLG_HC */
-	u32		hc_rate;
-};
-
-struct bnx2x_txq_init_params {
-	/* cxt*/
-	struct eth_context *cxt;
-
-	/* dma */
-	dma_addr_t	dscr_map;
-
-	u16		flags;
-	u16		fw_sb_id;
-	u8		sb_cq_index;
-	u8		cos;		/* valid iff QUEUE_FLG_COS */
-	u16		stat_id;	/* valid iff QUEUE_FLG_STATS */
-	u16		traffic_type;
-	u32		cid;
-	u16		hc_rate;	/* desired interrupts per sec.*/
-					/* valid iff QUEUE_FLG_HC */
-
-};
-
-struct bnx2x_client_ramrod_params {
-	int *pstate;
-	int state;
-	u16 index;
-	u16 cl_id;
-	u32 cid;
-	u8 poll;
-#define CLIENT_IS_FCOE			0x01
-#define CLIENT_IS_LEADING_RSS		0x02
-	u8 flags;
-};
-
-struct bnx2x_client_init_params {
-	struct rxq_pause_params pause;
-	struct bnx2x_rxq_init_params rxq_params;
-	struct bnx2x_txq_init_params txq_params;
-	struct bnx2x_client_ramrod_params ramrod_params;
-};
-
-struct bnx2x_rss_params {
-	int	mode;
-	u16	cap;
-	u16	result_mask;
-};
 
 struct bnx2x_func_init_params {
-
-	/* rss */
-	struct bnx2x_rss_params *rss;	/* valid iff FUNC_FLG_RSS */
-
 	/* dma */
 	dma_addr_t	fw_stat_map;	/* valid iff FUNC_FLG_STATS */
 	dma_addr_t	spq_map;	/* valid iff FUNC_FLG_SPQ */
@@ -1414,17 +1456,10 @@
 };
 
 #define for_each_eth_queue(bp, var) \
-			for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+	for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
 
 #define for_each_nondefault_eth_queue(bp, var) \
-			for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
-
-#define for_each_napi_queue(bp, var) \
-	for (var = 0; \
-		var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
-		if (skip_queue(bp, var))	\
-			continue;		\
-		else
+	for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
 
 #define for_each_queue(bp, var) \
 	for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
@@ -1462,11 +1497,66 @@
 
 #define skip_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
-#define WAIT_RAMROD_POLL	0x01
-#define WAIT_RAMROD_COMMON	0x02
 
+
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp:			driver handle
+ * @mac:		MAC to configure
+ * @obj:		MAC object handle
+ * @set:		if 'true' add a new MAC, otherwise - delete
+ * @mac_type:		the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags:	RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags);
+/**
+ * Deletes all MACs configured for the specific MAC object.
+ *
+ * @param bp Function driver instance
+ * @param mac_obj MAC object to cleanup
+ *
+ * @return zero if all MACs were cleaned
+ */
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp:			driver handle
+ * @mac_obj:		MAC object handle
+ * @mac_type:		type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp:	if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp);
+
+/* Init Function API  */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 void bnx2x_read_mf_cfg(struct bnx2x *bp);
 
+
 /* dmae */
 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
@@ -1477,22 +1567,12 @@
 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 		      bool with_comp, u8 comp_type);
 
-int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
-		  u32 data_hi, u32 data_lo, int common);
-
-/* Clears multicast and unicast list configuration in the chip. */
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
-void bnx2x_invalidate_uc_list(struct bnx2x *bp);
-
+		  u32 data_hi, u32 data_lo, int cmd_type);
 void bnx2x_update_coalesce(struct bnx2x *bp);
-int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
 
 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 			   int wait)
@@ -1648,7 +1728,8 @@
 
 /* must be used on a CID before placing it on a HW ring */
 #define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \
-					 (BP_E1HVN(bp) << 17) | (x))
+					 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+					 (x))
 
 #define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe))
 #define MAX_SP_DESC_CNT			(SP_DESC_CNT - 1)
@@ -1718,12 +1799,14 @@
 				(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
-				 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
 #define HW_PRTY_ASSERT_SET_0	(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
-				 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
+				 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
 #define HW_INTERRUT_ASSERT_SET_1 \
 				(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
@@ -1736,17 +1819,22 @@
 				 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
-#define HW_PRTY_ASSERT_SET_1	(AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
+#define HW_PRTY_ASSERT_SET_1	(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
 			     AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
-				 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
+				 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
 #define HW_INTERRUT_ASSERT_SET_2 \
 				(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
 				 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
@@ -1758,6 +1846,7 @@
 			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
 				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
 				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
 
@@ -1775,6 +1864,30 @@
 		  TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 #define MULTI_MASK			0x7f
 
+
+#define DEF_USB_FUNC_OFF	offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF	offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF	offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF	offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+			offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+			offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+			offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+			offsetof(struct tstorm_def_status_block, segment)
+
 #define BNX2X_SP_DSB_INDEX \
 		(&bp->def_status_blk->sp_sb.\
 					index_values[HC_SP_INDEX_ETH_DEF_CONS])
@@ -1786,7 +1899,7 @@
 	} while (0)
 
 #define GET_FLAG(value, mask) \
-	(((value) &= (mask)) >> (mask##_SHIFT))
+	(((value) & (mask)) >> (mask##_SHIFT))
 
 #define GET_FIELD(value, fname) \
 	(((value) & (fname##_MASK)) >> (fname##_SHIFT))
@@ -1821,15 +1934,13 @@
 #define HC_SEG_ACCESS_ATTN		4
 #define HC_SEG_ACCESS_NORM		0   /*Driver decision 0-1*/
 
-#ifdef BNX2X_MAIN
-#define BNX2X_EXTERN
-#else
-#define BNX2X_EXTERN extern
-#endif
+static const u32 dmae_reg_go_c[] = {
+	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
 
-BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
-
-extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
-void bnx2x_push_indir_table(struct bnx2x *bp);
-
+void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
 #endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 2890443..bb75560 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -17,16 +17,17 @@
 
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
+#include <linux/interrupt.h>
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <linux/firmware.h>
 #include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
-
 #include "bnx2x_init.h"
+#include "bnx2x_sp.h"
 
-static int bnx2x_setup_irqs(struct bnx2x *bp);
+
 
 /**
  * bnx2x_bz_fp - zero content of the fastpath structure.
@@ -71,6 +72,8 @@
 	to_fp->napi = orig_napi;
 }
 
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
  */
@@ -87,8 +90,8 @@
 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
 	prefetch(&skb->end);
 
-	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
-	   idx, tx_buf, skb);
+	DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
+	   fp->index, idx, tx_buf, skb);
 
 	/* unmap first bd */
 	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
@@ -96,6 +99,7 @@
 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 
+
 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 #ifdef BNX2X_STOP_ON_ERROR
 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
@@ -174,6 +178,9 @@
 	 * memory barrier, there is a small possibility that
 	 * start_xmit() will miss it and cause the queue to be stopped
 	 * forever.
+	 * On the other hand we need an rmb() here to ensure the proper
+	 * ordering of bit testing in the following
+	 * netif_tx_queue_stopped(txq) call.
 	 */
 	smp_mb();
 
@@ -225,7 +232,7 @@
 
 	/* First mark all used pages */
 	for (i = 0; i < sge_len; i++)
-		SGE_MASK_CLEAR_BIT(fp,
+		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 			RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
 
 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
@@ -237,8 +244,8 @@
 		le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 
 	last_max = RX_SGE(fp->last_max_sge);
-	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
-	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 
 	/* If ring is not full */
 	if (last_elem + 1 != first_elem)
@@ -249,8 +256,8 @@
 		if (likely(fp->sge_mask[i]))
 			break;
 
-		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
-		delta += RX_SGE_MASK_ELEM_SZ;
+		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+		delta += BIT_VEC64_ELEM_SZ;
 	}
 
 	if (delta > 0) {
@@ -265,33 +272,56 @@
 }
 
 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-			    struct sk_buff *skb, u16 cons, u16 prod)
+			    struct sk_buff *skb, u16 cons, u16 prod,
+			    struct eth_fast_path_rx_cqe *cqe)
 {
 	struct bnx2x *bp = fp->bp;
 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 	dma_addr_t mapping;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 
-	/* move empty skb from pool to prod and map it */
-	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
-	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-				 fp->rx_buf_size, DMA_FROM_DEVICE);
-	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
-	/* move partial skb from cons to pool (don't unmap yet) */
-	fp->tpa_pool[queue] = *cons_rx_buf;
-
-	/* mark bin state as start - print error if current state != stop */
-	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
+	/* print error if current state != stop */
+	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 
-	fp->tpa_state[queue] = BNX2X_TPA_START;
+	/* Try to map an empty skb from the aggregation info  */
+	mapping = dma_map_single(&bp->pdev->dev,
+				 first_buf->skb->data,
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+	/*
+	 *  ...if it fails - move the skb from the consumer to the producer
+	 *  and set the current aggregation state as ERROR to drop it
+	 *  when TPA_STOP arrives.
+	 */
 
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		/* Move the BD from the consumer to the producer */
+		bnx2x_reuse_rx_skb(fp, cons, prod);
+		tpa_info->tpa_state = BNX2X_TPA_ERROR;
+		return;
+	}
+
+	/* move empty skb from pool to prod */
+	prod_rx_buf->skb = first_buf->skb;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 	/* point prod_bd to new skb */
 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 
+	/* move partial skb from cons to pool (don't unmap yet) */
+	*first_buf = *cons_rx_buf;
+
+	/* mark bin state as START */
+	tpa_info->parsing_flags =
+		le16_to_cpu(cqe->pars_flags.flags);
+	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+	tpa_info->tpa_state = BNX2X_TPA_START;
+	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+	tpa_info->placement_offset = cqe->placement_offset;
+
 #ifdef BNX2X_STOP_ON_ERROR
 	fp->tpa_queue_used |= (1 << queue);
 #ifdef _ASM_GENERIC_INT_L64_H
@@ -322,10 +352,17 @@
 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
 				    u16 len_on_bd)
 {
-	/* TPA arrgregation won't have an IP options and TCP options
-	 * other than timestamp.
+	/*
+	 * TPA arrgregation won't have either IP options or TCP options
+	 * other than timestamp or IPv6 extension headers.
 	 */
-	u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+	    PRS_FLAG_OVERETH_IPV6)
+		hdrs_len += sizeof(struct ipv6hdr);
+	else /* IPv4 */
+		hdrs_len += sizeof(struct iphdr);
 
 
 	/* Check if there was a TCP timestamp, if there is it's will
@@ -340,30 +377,30 @@
 }
 
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			       struct sk_buff *skb,
-			       struct eth_fast_path_rx_cqe *fp_cqe,
-			       u16 cqe_idx, u16 parsing_flags)
+			       u16 queue, struct sk_buff *skb,
+			       struct eth_end_agg_rx_cqe *cqe,
+			       u16 cqe_idx)
 {
 	struct sw_rx_page *rx_pg, old_rx_pg;
-	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
 	u32 i, frag_len, frag_size, pages;
 	int err;
 	int j;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	u16 len_on_bd = tpa_info->len_on_bd;
 
-	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
+	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 
 	/* This is needed in order to enable forwarding support */
 	if (frag_size)
-		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
-							      len_on_bd);
+		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+					tpa_info->parsing_flags, len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 			  pages, cqe_idx);
-		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
-			  fp_cqe->pkt_len, len_on_bd);
+		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 		bnx2x_panic();
 		return -EINVAL;
 	}
@@ -371,8 +408,7 @@
 
 	/* Run through the SGL and compose the fragmented skb */
 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
-		u16 sge_idx =
-			RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
+		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 
 		/* FW gives the indices of the SGE as if the ring is an array
 		   (meaning that "next" element will consume 2 indices) */
@@ -407,13 +443,28 @@
 }
 
 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
+			   u16 queue, struct eth_end_agg_rx_cqe *cqe,
 			   u16 cqe_idx)
 {
-	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+	u8 pad = tpa_info->placement_offset;
+	u16 len = tpa_info->len_on_bd;
 	struct sk_buff *skb = rx_buf->skb;
 	/* alloc new skb */
-	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+	struct sk_buff *new_skb;
+	u8 old_tpa_state = tpa_info->tpa_state;
+
+	tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+	/* If we there was an error during the handling of the TPA_START -
+	 * drop this aggregation.
+	 */
+	if (old_tpa_state == BNX2X_TPA_ERROR)
+		goto drop;
+
+	/* Try to allocate the new skb */
+	new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
@@ -422,11 +473,6 @@
 			 fp->rx_buf_size, DMA_FROM_DEVICE);
 
 	if (likely(new_skb)) {
-		/* fix ip xsum and give it to the stack */
-		/* (no need to map the new skb) */
-		u16 parsing_flags =
-			le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
-
 		prefetch(skb);
 		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
@@ -446,21 +492,9 @@
 		skb->protocol = eth_type_trans(skb, bp->dev);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-		{
-			struct iphdr *iph;
-
-			iph = (struct iphdr *)skb->data;
-			iph->check = 0;
-			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
-		}
-
-		if (!bnx2x_fill_frag_skb(bp, fp, skb,
-					 &cqe->fast_path_cqe, cqe_idx,
-					 parsing_flags)) {
-			if (parsing_flags & PARSING_FLAGS_VLAN)
-				__vlan_hwaccel_put_tag(skb,
-						 le16_to_cpu(cqe->fast_path_cqe.
-							     vlan_tag));
+		if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 			napi_gro_receive(&fp->napi, skb);
 		} else {
 			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
@@ -470,16 +504,16 @@
 
 
 		/* put new skb in bin */
-		fp->tpa_pool[queue].skb = new_skb;
+		rx_buf->skb = new_skb;
 
-	} else {
-		/* else drop the packet and keep the buffer in the bin */
-		DP(NETIF_MSG_RX_STATUS,
-		   "Failed to allocate new skb - dropping packet!\n");
-		fp->eth_q_stats.rx_skb_alloc_failed++;
+		return;
 	}
 
-	fp->tpa_state[queue] = BNX2X_TPA_STOP;
+drop:
+	/* drop the packet and keep the buffer in the bin */
+	DP(NETIF_MSG_RX_STATUS,
+	   "Failed to allocate or map a new skb - dropping packet!\n");
+	fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
 /* Set Toeplitz hash value in the skb using the value from the
@@ -533,9 +567,16 @@
 		struct sw_rx_bd *rx_buf = NULL;
 		struct sk_buff *skb;
 		union eth_rx_cqe *cqe;
+		struct eth_fast_path_rx_cqe *cqe_fp;
 		u8 cqe_fp_flags;
+		enum eth_rx_cqe_type cqe_fp_type;
 		u16 len, pad;
 
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic))
+			return 0;
+#endif
+
 		comp_ring_cons = RCQ_BD(sw_comp_cons);
 		bd_prod = RX_BD(bd_prod);
 		bd_cons = RX_BD(bd_cons);
@@ -548,17 +589,18 @@
 				  PAGE_SIZE + 1));
 
 		cqe = &fp->rx_comp_ring[comp_ring_cons];
-		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+		cqe_fp = &cqe->fast_path_cqe;
+		cqe_fp_flags = cqe_fp->type_error_flags;
+		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 
 		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
-		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
-		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
-		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
-		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
+		   cqe_fp_flags, cqe_fp->status_flags,
+		   le32_to_cpu(cqe_fp->rss_hash_result),
+		   le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
 
 		/* is this a slowpath msg? */
-		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
+		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 			bnx2x_sp_event(fp, cqe);
 			goto next_cqe;
 
@@ -567,61 +609,59 @@
 			rx_buf = &fp->rx_buf_ring[bd_cons];
 			skb = rx_buf->skb;
 			prefetch(skb);
-			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-			pad = cqe->fast_path_cqe.placement_offset;
 
-			/* - If CQE is marked both TPA_START and TPA_END it is
-			 *   a non-TPA CQE.
-			 * - FP CQE will always have either TPA_START or/and
-			 *   TPA_STOP flags set.
-			 */
-			if ((!fp->disable_tpa) &&
-			    (TPA_TYPE(cqe_fp_flags) !=
-					(TPA_TYPE_START | TPA_TYPE_END))) {
-				u16 queue = cqe->fast_path_cqe.queue_index;
+			if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+				/* sanity check */
+				if (fp->disable_tpa &&
+				    (CQE_TYPE_START(cqe_fp_type) ||
+				     CQE_TYPE_STOP(cqe_fp_type)))
+					BNX2X_ERR("START/STOP packet while "
+						  "disable_tpa type %x\n",
+						  CQE_TYPE(cqe_fp_type));
+#endif
 
-				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
+				if (CQE_TYPE_START(cqe_fp_type)) {
+					u16 queue = cqe_fp->queue_index;
 					DP(NETIF_MSG_RX_STATUS,
 					   "calling tpa_start on queue %d\n",
 					   queue);
 
 					bnx2x_tpa_start(fp, queue, skb,
-							bd_cons, bd_prod);
+							bd_cons, bd_prod,
+							cqe_fp);
 
-					/* Set Toeplitz hash for an LRO skb */
+					/* Set Toeplitz hash for LRO skb */
 					bnx2x_set_skb_rxhash(bp, cqe, skb);
 
 					goto next_rx;
-				} else { /* TPA_STOP */
+
+				} else {
+					u16 queue =
+						cqe->end_agg_cqe.queue_index;
 					DP(NETIF_MSG_RX_STATUS,
 					   "calling tpa_stop on queue %d\n",
 					   queue);
 
-					if (!BNX2X_RX_SUM_FIX(cqe))
-						BNX2X_ERR("STOP on none TCP "
-							  "data\n");
-
-					/* This is a size of the linear data
-					   on this skb */
-					len = le16_to_cpu(cqe->fast_path_cqe.
-								len_on_bd);
-					bnx2x_tpa_stop(bp, fp, queue, pad,
-						    len, cqe, comp_ring_cons);
+					bnx2x_tpa_stop(bp, fp, queue,
+						       &cqe->end_agg_cqe,
+						       comp_ring_cons);
 #ifdef BNX2X_STOP_ON_ERROR
 					if (bp->panic)
 						return 0;
 #endif
 
-					bnx2x_update_sge_prod(fp,
-							&cqe->fast_path_cqe);
+					bnx2x_update_sge_prod(fp, cqe_fp);
 					goto next_cqe;
 				}
 			}
-
+			/* non TPA */
+			len = le16_to_cpu(cqe_fp->pkt_len);
+			pad = cqe_fp->placement_offset;
 			dma_sync_single_for_device(&bp->pdev->dev,
 					dma_unmap_addr(rx_buf, mapping),
-						   pad + RX_COPY_THRESH,
-						   DMA_FROM_DEVICE);
+						       pad + RX_COPY_THRESH,
+						       DMA_FROM_DEVICE);
 			prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 			/* is this an error packet? */
@@ -640,8 +680,7 @@
 			    (len <= RX_COPY_THRESH)) {
 				struct sk_buff *new_skb;
 
-				new_skb = netdev_alloc_skb(bp->dev,
-							   len + pad);
+				new_skb = netdev_alloc_skb(bp->dev, len + pad);
 				if (new_skb == NULL) {
 					DP(NETIF_MSG_RX_ERR,
 					   "ERROR  packet dropped "
@@ -687,6 +726,7 @@
 			skb_checksum_none_assert(skb);
 
 			if (bp->dev->features & NETIF_F_RXCSUM) {
+
 				if (likely(BNX2X_RX_CSUM_OK(cqe)))
 					skb->ip_summed = CHECKSUM_UNNECESSARY;
 				else
@@ -696,10 +736,10 @@
 
 		skb_record_rx_queue(skb, fp->index);
 
-		if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-		     PARSING_FLAGS_VLAN)
+		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+		    PARSING_FLAGS_VLAN)
 			__vlan_hwaccel_put_tag(skb,
-				le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+					       le16_to_cpu(cqe_fp->vlan_tag));
 		napi_gro_receive(&fp->napi, skb);
 
 
@@ -738,12 +778,6 @@
 	struct bnx2x_fastpath *fp = fp_cookie;
 	struct bnx2x *bp = fp->bp;
 
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
 	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
 			 "[fp %d fw_sd %d igusb %d]\n",
 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
@@ -931,7 +965,7 @@
 {
 	int func = BP_FUNC(bp);
 	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-					      ETH_MAX_AGGREGATION_QUEUES_E1H;
+					      ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
 	u16 ring_prod;
 	int i, j;
 
@@ -943,11 +977,16 @@
 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 
 		if (!fp->disable_tpa) {
-			/* Fill the per-aggregation pool */
+			/* Fill the per-aggregtion pool */
 			for (i = 0; i < max_agg_queues; i++) {
-				fp->tpa_pool[i].skb =
-				   netdev_alloc_skb(bp->dev, fp->rx_buf_size);
-				if (!fp->tpa_pool[i].skb) {
+				struct bnx2x_agg_info *tpa_info =
+					&fp->tpa_info[i];
+				struct sw_rx_bd *first_buf =
+					&tpa_info->first_buf;
+
+				first_buf->skb = netdev_alloc_skb(bp->dev,
+						       fp->rx_buf_size);
+				if (!first_buf->skb) {
 					BNX2X_ERR("Failed to allocate TPA "
 						  "skb pool for queue[%d] - "
 						  "disabling TPA on this "
@@ -956,10 +995,8 @@
 					fp->disable_tpa = 1;
 					break;
 				}
-				dma_unmap_addr_set((struct sw_rx_bd *)
-							&bp->fp->tpa_pool[i],
-						   mapping, 0);
-				fp->tpa_state[i] = BNX2X_TPA_STOP;
+				dma_unmap_addr_set(first_buf, mapping, 0);
+				tpa_info->tpa_state = BNX2X_TPA_STOP;
 			}
 
 			/* "next page" elements initialization */
@@ -975,13 +1012,13 @@
 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
 					BNX2X_ERR("was only able to allocate "
 						  "%d rx sges\n", i);
-					BNX2X_ERR("disabling TPA for"
-						  " queue[%d]\n", j);
+					BNX2X_ERR("disabling TPA for "
+						  "queue[%d]\n", j);
 					/* Cleanup already allocated elements */
-					bnx2x_free_rx_sge_range(bp,
-								fp, ring_prod);
-					bnx2x_free_tpa_pool(bp,
-							    fp, max_agg_queues);
+					bnx2x_free_rx_sge_range(bp, fp,
+								ring_prod);
+					bnx2x_free_tpa_pool(bp, fp,
+							    max_agg_queues);
 					fp->disable_tpa = 1;
 					ring_prod = 0;
 					break;
@@ -1009,7 +1046,7 @@
 		if (j != 0)
 			continue;
 
-		if (!CHIP_IS_E2(bp)) {
+		if (CHIP_IS_E1(bp)) {
 			REG_WR(bp, BAR_USTRORM_INTMEM +
 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
 			       U64_LO(fp->rx_comp_mapping));
@@ -1053,7 +1090,6 @@
 
 		if (skb == NULL)
 			continue;
-
 		dma_unmap_single(&bp->pdev->dev,
 				 dma_unmap_addr(rx_buf, mapping),
 				 fp->rx_buf_size, DMA_FROM_DEVICE);
@@ -1075,7 +1111,7 @@
 		if (!fp->disable_tpa)
 			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
 					    ETH_MAX_AGGREGATION_QUEUES_E1 :
-					    ETH_MAX_AGGREGATION_QUEUES_E1H);
+					    ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
 	}
 }
 
@@ -1102,30 +1138,43 @@
 	}
 }
 
-static void bnx2x_free_msix_irqs(struct bnx2x *bp)
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp:		driver handle
+ * @nvecs:	number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
 {
-	int i, offset = 1;
+	int i, offset = 0;
 
-	free_irq(bp->msix_table[0].vector, bp->dev);
+	if (nvecs == offset)
+		return;
+	free_irq(bp->msix_table[offset].vector, bp->dev);
 	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
-	   bp->msix_table[0].vector);
-
+	   bp->msix_table[offset].vector);
+	offset++;
 #ifdef BCM_CNIC
+	if (nvecs == offset)
+		return;
 	offset++;
 #endif
-	for_each_eth_queue(bp, i) {
-		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
-		   "state %x\n", i, bp->msix_table[i + offset].vector,
-		   bnx2x_fp(bp, i, state));
 
-		free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
+	for_each_eth_queue(bp, i) {
+		if (nvecs == offset)
+			return;
+		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
+		   "irq\n", i, bp->msix_table[offset].vector);
+
+		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
 	}
 }
 
 void bnx2x_free_irq(struct bnx2x *bp)
 {
 	if (bp->flags & USING_MSIX_FLAG)
-		bnx2x_free_msix_irqs(bp);
+		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
+				     CNIC_CONTEXT_USE + 1);
 	else if (bp->flags & USING_MSI_FLAG)
 		free_irq(bp->pdev->irq, bp->dev);
 	else
@@ -1198,9 +1247,10 @@
 
 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 {
-	int i, rc, offset = 1;
+	int i, rc, offset = 0;
 
-	rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
+	rc = request_irq(bp->msix_table[offset++].vector,
+			 bnx2x_msix_sp_int, 0,
 			 bp->dev->name, bp->dev);
 	if (rc) {
 		BNX2X_ERR("request sp irq failed\n");
@@ -1218,13 +1268,13 @@
 		rc = request_irq(bp->msix_table[offset].vector,
 				 bnx2x_msix_fp_int, 0, fp->name, fp);
 		if (rc) {
-			BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
-			bnx2x_free_msix_irqs(bp);
+			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
+			      bp->msix_table[offset].vector, rc);
+			bnx2x_free_msix_irqs(bp, offset);
 			return -EBUSY;
 		}
 
 		offset++;
-		fp->state = BNX2X_FP_STATE_IRQ;
 	}
 
 	i = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1264,42 +1314,56 @@
 
 	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
 			 bp->dev->name, bp->dev);
-	if (!rc)
-		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
-
 	return rc;
 }
 
-static void bnx2x_napi_enable(struct bnx2x *bp)
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+	int rc = 0;
+	if (bp->flags & USING_MSIX_FLAG) {
+		rc = bnx2x_req_msix_irqs(bp);
+		if (rc)
+			return rc;
+	} else {
+		bnx2x_ack_int(bp);
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+			return rc;
+		}
+		if (bp->flags & USING_MSI_FLAG) {
+			bp->dev->irq = bp->pdev->irq;
+			netdev_info(bp->dev, "using MSI  IRQ %d\n",
+			       bp->pdev->irq);
+		}
+	}
+
+	return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
-static void bnx2x_napi_disable(struct bnx2x *bp)
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		napi_disable(&bnx2x_fp(bp, i, napi));
 }
 
 void bnx2x_netif_start(struct bnx2x *bp)
 {
-	int intr_sem;
-
-	intr_sem = atomic_dec_and_test(&bp->intr_sem);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
-	if (intr_sem) {
-		if (netif_running(bp->dev)) {
-			bnx2x_napi_enable(bp);
-			bnx2x_int_enable(bp);
-			if (bp->state == BNX2X_STATE_OPEN)
-				netif_tx_wake_all_queues(bp->dev);
-		}
+	if (netif_running(bp->dev)) {
+		bnx2x_napi_enable(bp);
+		bnx2x_int_enable(bp);
+		if (bp->state == BNX2X_STATE_OPEN)
+			netif_tx_wake_all_queues(bp->dev);
 	}
 }
 
@@ -1307,7 +1371,6 @@
 {
 	bnx2x_int_disable_sync(bp, disable_hw);
 	bnx2x_napi_disable(bp);
-	netif_tx_disable(bp->dev);
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -1358,26 +1421,6 @@
 	bp->num_queues += NONE_ETH_CONTEXT_USE;
 }
 
-#ifdef BCM_CNIC
-static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
-{
-	if (!NO_FCOE(bp)) {
-		if (!IS_MF_SD(bp))
-			bnx2x_set_fip_eth_mac_addr(bp, 1);
-		bnx2x_set_all_enode_macs(bp, 1);
-		bp->flags |= FCOE_MACS_SET;
-	}
-}
-#endif
-
-static void bnx2x_release_firmware(struct bnx2x *bp)
-{
-	kfree(bp->init_ops_offsets);
-	kfree(bp->init_ops);
-	kfree(bp->init_data);
-	release_firmware(bp->firmware);
-}
-
 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
 {
 	int rc, num = bp->num_queues;
@@ -1409,27 +1452,198 @@
 			 */
 			fp->rx_buf_size =
 				BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
-				BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 		else
 			fp->rx_buf_size =
-				bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-				IP_HEADER_ALIGNMENT_PADDING;
+				bp->dev->mtu + ETH_OVREHEAD +
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 	}
 }
 
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+	int i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/*
+	 * Prepare the inital contents fo the indirection table if RSS is
+	 * enabled
+	 */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		for (i = 0; i < sizeof(ind_table); i++)
+			ind_table[i] =
+				bp->fp->cl_id +	(i % num_eth_queues);
+	}
+
+	/*
+	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+	 * per-port, so if explicit configuration is needed , do it only
+	 * for a PMF.
+	 *
+	 * For 57712 and newer on the other hand it's a per-function
+	 * configuration.
+	 */
+	return bnx2x_config_rss_pf(bp, ind_table,
+				   bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+	struct bnx2x_config_rss_params params = {0};
+	int i;
+
+	/* Although RSS is meaningless when there is a single HW queue we
+	 * still need it enabled in order to have HW Rx hash generated.
+	 *
+	 * if (!is_eth_multi(bp))
+	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
+	 */
+
+	params.rss_obj = &bp->rss_conf_obj;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	/* RSS mode */
+	switch (bp->multi_mode) {
+	case ETH_RSS_MODE_DISABLED:
+		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_REGULAR:
+		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_VLAN_PRI:
+		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_E1HOV_PRI:
+		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_IP_DSCP:
+		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
+		break;
+	default:
+		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+		return -EINVAL;
+	}
+
+	/* If RSS is enabled */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		/* RSS configuration */
+		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+
+		/* Hash bits */
+		params.rss_result_mask = MULTI_MASK;
+
+		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+		if (config_hash) {
+			/* RSS keys */
+			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+				params.rss_key[i] = random32();
+
+			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+		}
+	}
+
+	return bnx2x_config_rss(bp, &params);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {0};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+	/***************** Cleanup MACs' object first *************************/
+
+	/* Wait for completion of requested */
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Perform a dry cleanup */
+	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+	/* Clean ETH primary MAC */
+	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+	/* Cleanup UC list */
+	vlan_mac_flags = 0;
+	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+	/***************** Now clean mcast object *****************************/
+	rparam.mcast_obj = &bp->mcast_obj;
+	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+	/* Add a DEL command... */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+			  "object: %d\n", rc);
+
+	/* ...and wait until all pending commands are cleared */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	while (rc != 0) {
+		if (rc < 0) {
+			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+				  rc);
+			return;
+		}
+
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	}
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		goto label; \
+	} while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#endif
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
+	int port = BP_PORT(bp);
 	u32 load_code;
 	int i, rc;
 
-	/* Set init arrays */
-	rc = bnx2x_init_firmware(bp);
-	if (rc) {
-		BNX2X_ERR("Error loading firmware\n");
-		return rc;
-	}
-
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
 		return -EPERM;
@@ -1456,6 +1670,10 @@
 	/* Set the receive queues buffer size */
 	bnx2x_set_rx_buf_size(bp);
 
+	/*
+	 * set the tpa flag for each queue. The tpa flag determines the queue
+	 * minimal size so it must be set prior to queue memory allocation
+	 */
 	for_each_queue(bp, i)
 		bnx2x_fp(bp, i, disable_tpa) =
 					((bp->flags & TPA_ENABLE_FLAG) == 0);
@@ -1475,31 +1693,30 @@
 	rc = bnx2x_set_real_num_queues(bp);
 	if (rc) {
 		BNX2X_ERR("Unable to set real_num_queues\n");
-		goto load_error0;
+		LOAD_ERROR_EXIT(bp, load_error0);
 	}
 
 	bnx2x_napi_enable(bp);
 
 	/* Send LOAD_REQUEST command to MCP
-	   Returns the type of LOAD command:
-	   if it is the first port to be initialized
-	   common blocks should be initialized, otherwise - not
-	*/
+	 * Returns the type of LOAD command:
+	 * if it is the first port to be initialized
+	 * common blocks should be initialized, otherwise - not
+	 */
 	if (!BP_NOMCP(bp)) {
 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
 		if (!load_code) {
 			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
-			goto load_error1;
+			LOAD_ERROR_EXIT(bp, load_error1);
 		}
 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
 			rc = -EBUSY; /* other port in diagnostic mode */
-			goto load_error1;
+			LOAD_ERROR_EXIT(bp, load_error1);
 		}
 
 	} else {
 		int path = BP_PATH(bp);
-		int port = BP_PORT(bp);
 
 		DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
 		   path, load_count[path][0], load_count[path][1],
@@ -1519,36 +1736,58 @@
 
 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
-	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
+	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
 		bp->port.pmf = 1;
-	else
+		/*
+		 * We need the barrier to ensure the ordering between the
+		 * writing to bp->port.pmf here and reading it from the
+		 * bnx2x_periodic_task().
+		 */
+		smp_mb();
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+	} else
 		bp->port.pmf = 0;
 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 
+	/* Init Function state controlling object */
+	bnx2x__init_func_obj(bp);
+
 	/* Initialize HW */
 	rc = bnx2x_init_hw(bp, load_code);
 	if (rc) {
 		BNX2X_ERR("HW init failed, aborting\n");
 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-		goto load_error2;
+		LOAD_ERROR_EXIT(bp, load_error2);
 	}
 
 	/* Connect to IRQs */
 	rc = bnx2x_setup_irqs(bp);
 	if (rc) {
 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-		goto load_error2;
+		LOAD_ERROR_EXIT(bp, load_error2);
 	}
 
 	/* Setup NIC internals and enable interrupts */
 	bnx2x_nic_init(bp, load_code);
 
+	/* Init per-function objects */
+	bnx2x_init_bp_objs(bp);
+
 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
-	    (bp->common.shmem2_base))
-		SHMEM2_WR(bp, dcc_support,
-			  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
-			   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+	    (bp->common.shmem2_base)) {
+		if (SHMEM2_HAS(bp, dcc_support))
+			SHMEM2_WR(bp, dcc_support,
+				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+	}
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+	rc = bnx2x_func_start(bp);
+	if (rc) {
+		BNX2X_ERR("Function start failed!\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
 
 	/* Send LOAD_DONE command to MCP */
 	if (!BP_NOMCP(bp)) {
@@ -1556,74 +1795,38 @@
 		if (!load_code) {
 			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
-			goto load_error3;
+			LOAD_ERROR_EXIT(bp, load_error3);
 		}
 	}
 
-	bnx2x_dcbx_init(bp);
-
-	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
-
-	rc = bnx2x_func_start(bp);
-	if (rc) {
-		BNX2X_ERR("Function start failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-		goto load_error3;
-#else
-		bp->panic = 1;
-		return -EBUSY;
-#endif
-	}
-
-	rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
+	rc = bnx2x_setup_leading(bp);
 	if (rc) {
 		BNX2X_ERR("Setup leading failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-		goto load_error3;
-#else
-		bp->panic = 1;
-		return -EBUSY;
-#endif
-	}
-
-	if (!CHIP_IS_E1(bp) &&
-	    (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
-		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
-		bp->flags |= MF_FUNC_DIS;
+		LOAD_ERROR_EXIT(bp, load_error3);
 	}
 
 #ifdef BCM_CNIC
 	/* Enable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
 #endif
 
 	for_each_nondefault_queue(bp, i) {
-		rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
+		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
 		if (rc)
-#ifdef BCM_CNIC
-			goto load_error4;
-#else
-			goto load_error3;
-#endif
+			LOAD_ERROR_EXIT(bp, load_error4);
 	}
 
+	rc = bnx2x_init_rss_pf(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
+
 	/* Now when Clients are configured we are ready to work */
 	bp->state = BNX2X_STATE_OPEN;
 
-#ifdef BCM_CNIC
-	bnx2x_set_fcoe_eth_macs(bp);
-#endif
-
-	bnx2x_set_eth_mac(bp, 1);
-
-	/* Clear MC configuration */
-	if (CHIP_IS_E1(bp))
-		bnx2x_invalidate_e1_mc_list(bp);
-	else
-		bnx2x_invalidate_e1h_mc_list(bp);
-
-	/* Clear UC lists configuration */
-	bnx2x_invalidate_uc_list(bp);
+	/* Configure a ucast MAC */
+	rc = bnx2x_set_eth_mac(bp, true);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
 
 	if (bp->pending_max) {
 		bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1633,15 +1836,18 @@
 	if (bp->port.pmf)
 		bnx2x_initial_phy_init(bp, load_mode);
 
-	/* Initialize Rx filtering */
-	bnx2x_set_rx_mode(bp->dev);
-
 	/* Start fast path */
+
+	/* Initialize Rx filter. */
+	netif_addr_lock_bh(bp->dev);
+	bnx2x_set_rx_mode(bp->dev);
+	netif_addr_unlock_bh(bp->dev);
+
+	/* Start the Tx */
 	switch (load_mode) {
 	case LOAD_NORMAL:
 		/* Tx queue should be only reenabled */
 		netif_tx_wake_all_queues(bp->dev);
-		/* Initialize the receive filter. */
 		break;
 
 	case LOAD_OPEN:
@@ -1670,18 +1876,28 @@
 #endif
 	bnx2x_inc_load_cnt(bp);
 
-	bnx2x_release_firmware(bp);
+	/* Wait for all pending SP commands to complete */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+		return -EBUSY;
+	}
 
+	bnx2x_dcbx_init(bp);
 	return 0;
 
-#ifdef BCM_CNIC
+#ifndef BNX2X_STOP_ON_ERROR
 load_error4:
+#ifdef BCM_CNIC
 	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 #endif
 load_error3:
 	bnx2x_int_disable_sync(bp, 1);
 
+	/* Clean queueable objects */
+	bnx2x_squeeze_objects(bp);
+
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
 	for_each_rx_queue(bp, i)
@@ -1701,22 +1917,31 @@
 load_error0:
 	bnx2x_free_mem(bp);
 
-	bnx2x_release_firmware(bp);
-
 	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
 }
 
 /* must be called with rtnl_lock */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 {
 	int i;
+	bool global = false;
 
-	if (bp->state == BNX2X_STATE_CLOSED) {
-		/* Interface has been removed - nothing to recover */
+	if ((bp->state == BNX2X_STATE_CLOSED) ||
+	    (bp->state == BNX2X_STATE_ERROR)) {
+		/* We can get here if the driver has been unloaded
+		 * during parity error recovery and is either waiting for a
+		 * leader to complete or for other functions to unload and
+		 * then ifdown has been issued. In this case we want to
+		 * unload and let other functions to complete a recovery
+		 * process.
+		 */
 		bp->recovery_state = BNX2X_RECOVERY_DONE;
 		bp->is_leader = 0;
-		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-		smp_wmb();
+		bnx2x_release_leader_lock(bp);
+		smp_mb();
+
+		DP(NETIF_MSG_HW, "Releasing a leadership...\n");
 
 		return -EINVAL;
 	}
@@ -1725,18 +1950,19 @@
 	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 #endif
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+	smp_mb();
 
-	/* Set "drop all" */
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
-	bnx2x_set_storm_rx_mode(bp);
 
 	/* Stop Tx */
 	bnx2x_tx_disable(bp);
 
 	del_timer_sync(&bp->timer);
 
-	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
-		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+	/* Set ALWAYS_ALIVE bit in shmem */
+	bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+	bnx2x_drv_pulse(bp);
 
 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
@@ -1744,13 +1970,35 @@
 	if (unload_mode != UNLOAD_RECOVERY)
 		bnx2x_chip_cleanup(bp, unload_mode);
 	else {
-		/* Disable HW interrupts, NAPI and Tx */
+		/* Send the UNLOAD_REQUEST to the MCP */
+		bnx2x_send_unload_req(bp, unload_mode);
+
+		/*
+		 * Prevent transactions to host from the functions on the
+		 * engine that doesn't reset global blocks in case of global
+		 * attention once gloabl blocks are reset and gates are opened
+		 * (the engine which leader will perform the recovery
+		 * last).
+		 */
+		if (!CHIP_IS_E1x(bp))
+			bnx2x_pf_disable(bp);
+
+		/* Disable HW interrupts, NAPI */
 		bnx2x_netif_stop(bp, 1);
 
 		/* Release IRQs */
 		bnx2x_free_irq(bp);
+
+		/* Report UNLOAD_DONE to MCP */
+		bnx2x_send_unload_done(bp);
 	}
 
+	/*
+	 * At this stage no more interrupts will arrive so we may safly clean
+	 * the queueable objects here in case they failed to get cleaned so far.
+	 */
+	bnx2x_squeeze_objects(bp);
+
 	bp->port.pmf = 0;
 
 	/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -1762,17 +2010,24 @@
 
 	bp->state = BNX2X_STATE_CLOSED;
 
+	/* Check if there are pending parity attentions. If there are - set
+	 * RECOVERY_IN_PROGRESS.
+	 */
+	if (bnx2x_chk_parity_attn(bp, &global, false)) {
+		bnx2x_set_reset_in_progress(bp);
+
+		/* Set RESET_IS_GLOBAL if needed */
+		if (global)
+			bnx2x_set_reset_global(bp);
+	}
+
+
 	/* The last driver must disable a "close the gate" if there is no
 	 * parity attention or "process kill" pending.
 	 */
-	if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
-	    bnx2x_reset_is_done(bp))
+	if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
 		bnx2x_disable_close_the_gate(bp);
 
-	/* Reset MCP mail box sequence if there is on going recovery */
-	if (unload_mode == UNLOAD_RECOVERY)
-		bp->fw_seq = 0;
-
 	return 0;
 }
 
@@ -2148,6 +2403,22 @@
 				sizeof(struct udphdr) - skb->data;
 }
 
+static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+{
+	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+	if (xmit_type & XMIT_CSUM_V4)
+		tx_start_bd->bd_flags.as_bitfield |=
+					ETH_TX_BD_FLAGS_IP_CSUM;
+	else
+		tx_start_bd->bd_flags.as_bitfield |=
+					ETH_TX_BD_FLAGS_IPV6;
+
+	if (!(xmit_type & XMIT_CSUM_TCP))
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
 /**
  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
  *
@@ -2213,7 +2484,7 @@
 	struct bnx2x_fastpath *fp;
 	struct netdev_queue *txq;
 	struct sw_tx_bd *tx_buf;
-	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_start_bd *tx_start_bd, *first_bd;
 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
@@ -2275,7 +2546,15 @@
 		}
 	}
 #endif
-
+	/* Map skb linear data for DMA */
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+		   "silently dropping this SKB\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
 	/*
 	Please read carefully. First we use one BD which we mark as start,
 	then we have a parsing info BD (used for TSO or xsum),
@@ -2285,12 +2564,19 @@
 	And above all, all pdb sizes are in words - NOT DWORDS!
 	*/
 
-	pkt_prod = fp->tx_pkt_prod++;
+	/* get current pkt produced now - advance it just before sending packet
+	 * since mapping of pages may fail and cause packet to be dropped
+	 */
+	pkt_prod = fp->tx_pkt_prod;
 	bd_prod = TX_BD(fp->tx_bd_prod);
 
-	/* get a tx_buf and first BD */
+	/* get a tx_buf and first BD
+	 * tx_start_bd may be changed during SPLIT,
+	 * but first_bd will always stay first
+	 */
 	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
 	tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
+	first_bd = tx_start_bd;
 
 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
@@ -2319,22 +2605,10 @@
 	/* turn on parsing and get a BD */
 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 
-	if (xmit_type & XMIT_CSUM) {
-		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+	if (xmit_type & XMIT_CSUM)
+		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
 
-		if (xmit_type & XMIT_CSUM_V4)
-			tx_start_bd->bd_flags.as_bitfield |=
-						ETH_TX_BD_FLAGS_IP_CSUM;
-		else
-			tx_start_bd->bd_flags.as_bitfield |=
-						ETH_TX_BD_FLAGS_IPV6;
-
-		if (!(xmit_type & XMIT_CSUM_TCP))
-			tx_start_bd->bd_flags.as_bitfield |=
-						ETH_TX_BD_FLAGS_IS_UDP;
-	}
-
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 		/* Set PBD in checksum offload case */
@@ -2342,6 +2616,20 @@
 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
 						     &pbd_e2_parsing_data,
 						     xmit_type);
+		if (IS_MF_SI(bp)) {
+			/*
+			 * fill in the MAC addresses in the PBD - for local
+			 * switching
+			 */
+			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+					      &pbd_e2->src_mac_addr_mid,
+					      &pbd_e2->src_mac_addr_lo,
+					      eth->h_source);
+			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+					      &pbd_e2->dst_mac_addr_mid,
+					      &pbd_e2->dst_mac_addr_lo,
+					      eth->h_dest);
+		}
 	} else {
 		pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2351,15 +2639,10 @@
 
 	}
 
-	/* Map skb linear data for DMA */
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
-
 	/* Setup the data pointer of the first BD of the packet */
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
-	tx_start_bd->nbd = cpu_to_le16(nbd);
+	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 	pkt_size = tx_start_bd->nbytes;
 
@@ -2382,7 +2665,7 @@
 		if (unlikely(skb_headlen(skb) > hlen))
 			bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
 						 hlen, bd_prod, ++nbd);
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
 					     xmit_type);
 		else
@@ -2401,19 +2684,34 @@
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+		mapping = dma_map_page(&bp->pdev->dev, frag->page,
+				       frag->page_offset, frag->size,
+				       DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+			DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+						"dropping packet...\n");
+
+			/* we need unmap all buffers already mapped
+			 * for this SKB;
+			 * first_bd->nbd need to be properly updated
+			 * before call to bnx2x_free_tx_pkt
+			 */
+			first_bd->nbd = cpu_to_le16(nbd);
+			bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod));
+			return NETDEV_TX_OK;
+		}
+
 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 		tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
 		if (total_pkt_bd == NULL)
 			total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
 
-		mapping = dma_map_page(&bp->pdev->dev, frag->page,
-				       frag->page_offset,
-				       frag->size, DMA_TO_DEVICE);
-
 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 		tx_data_bd->nbytes = cpu_to_le16(frag->size);
 		le16_add_cpu(&pkt_size, frag->size);
+		nbd++;
 
 		DP(NETIF_MSG_TX_QUEUED,
 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
@@ -2423,6 +2721,9 @@
 
 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
 
+	/* update with actual num BDs */
+	first_bd->nbd = cpu_to_le16(nbd);
+
 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 
 	/* now send a tx doorbell, counting the next BD
@@ -2431,6 +2732,13 @@
 	if (TX_BD_POFF(bd_prod) < nbd)
 		nbd++;
 
+	/* total_pkt_bytes should be set on the first data BD if
+	 * it's not an LSO packet and there is more than one
+	 * data BD. In this case pkt_size is limited by an MTU value.
+	 * However we prefer to set it for an LSO packet (while we don't
+	 * have to) in order to save some CPU cycles in a none-LSO
+	 * case, when we much more care about them.
+	 */
 	if (total_pkt_bd != NULL)
 		total_pkt_bd->total_pkt_bytes = pkt_size;
 
@@ -2451,6 +2759,7 @@
 		   pbd_e2->parsing_data);
 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+	fp->tx_pkt_prod++;
 	/*
 	 * Make sure that the BD data is updated before updating the producer
 	 * since FW might read the BD right after the producer is updated.
@@ -2491,15 +2800,23 @@
 {
 	struct sockaddr *addr = p;
 	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
 
 	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
 		return -EINVAL;
 
-	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	if (netif_running(dev))
-		bnx2x_set_eth_mac(bp, 1);
+	if (netif_running(dev))  {
+		rc = bnx2x_set_eth_mac(bp, false);
+		if (rc)
+			return rc;
+	}
 
-	return 0;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (netif_running(dev))
+		rc = bnx2x_set_eth_mac(bp, true);
+
+	return rc;
 }
 
 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
@@ -2516,7 +2833,7 @@
 	} else {
 #endif
 		/* status blocks */
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_FREE(sb->e2_sb,
 				       bnx2x_fp(bp, fp_index,
 						status_blk_mapping),
@@ -2572,7 +2889,7 @@
 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
 {
 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		bnx2x_fp(bp, index, sb_index_values) =
 			(__le16 *)status_blk.e2_sb->sb.index_values;
 		bnx2x_fp(bp, index, sb_running_index) =
@@ -2609,7 +2926,7 @@
 	if (!IS_FCOE_IDX(index)) {
 #endif
 		/* status blocks */
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_ALLOC(sb->e2_sb,
 				&bnx2x_fp(bp, index, status_blk_mapping),
 				sizeof(struct host_hc_status_block_e2));
@@ -2620,7 +2937,12 @@
 #ifdef BCM_CNIC
 	}
 #endif
-	set_sb_shortcuts(bp, index);
+
+	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+	 * set shortcuts for it.
+	 */
+	if (!IS_FCOE_IDX(index))
+		set_sb_shortcuts(bp, index);
 
 	/* Tx */
 	if (!skip_tx_queue(bp, index)) {
@@ -2697,9 +3019,13 @@
 	if (bnx2x_alloc_fp_mem_at(bp, 0))
 		return -ENOMEM;
 #ifdef BCM_CNIC
-	/* FCoE */
-	if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
-		return -ENOMEM;
+	if (!NO_FCOE(bp))
+		/* FCoE */
+		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+			/* we will fail load process instead of mark
+			 * NO_FCOE_FLAG
+			 */
+			return -ENOMEM;
 #endif
 	/* RSS */
 	for_each_nondefault_eth_queue(bp, i)
@@ -2729,30 +3055,6 @@
 	return 0;
 }
 
-static int bnx2x_setup_irqs(struct bnx2x *bp)
-{
-	int rc = 0;
-	if (bp->flags & USING_MSIX_FLAG) {
-		rc = bnx2x_req_msix_irqs(bp);
-		if (rc)
-			return rc;
-	} else {
-		bnx2x_ack_int(bp);
-		rc = bnx2x_req_irq(bp);
-		if (rc) {
-			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
-			return rc;
-		}
-		if (bp->flags & USING_MSI_FLAG) {
-			bp->dev->irq = bp->pdev->irq;
-			netdev_info(bp->dev, "using MSI  IRQ %d\n",
-			       bp->pdev->irq);
-		}
-	}
-
-	return 0;
-}
-
 void bnx2x_free_mem_bp(struct bnx2x *bp)
 {
 	kfree(bp->fp);
@@ -2792,7 +3094,7 @@
 
 }
 
-static int bnx2x_reload_if_running(struct net_device *dev)
+int bnx2x_reload_if_running(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
@@ -2803,6 +3105,55 @@
 	return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
 
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = 0;
+	if (bp->link_params.num_phys <= 1)
+		return INT_PHY;
+
+	if (bp->link_vars.link_up) {
+		sel_phy_idx = EXT_PHY1;
+		/* In case link is SERDES, check if the EXT_PHY2 is the one */
+		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+			sel_phy_idx = EXT_PHY2;
+	} else {
+
+		switch (bnx2x_phy_selection(&bp->link_params)) {
+		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY1;
+		       break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY2;
+		       break;
+		}
+	}
+
+	return sel_phy_idx;
+
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+	/*
+	 * The selected actived PHY is always after swapping (in case PHY
+	 * swapping is enabled). So when swapping is enabled, we need to reverse
+	 * the configuration
+	 */
+
+	if (bp->link_params.multi_phy_config &
+	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+		if (sel_phy_idx == EXT_PHY1)
+			sel_phy_idx = EXT_PHY2;
+		else if (sel_phy_idx == EXT_PHY2)
+			sel_phy_idx = EXT_PHY1;
+	}
+	return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
 /* called with rtnl_lock */
 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -2954,3 +3305,57 @@
 
 	return rc;
 }
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid)
+{
+	/* ustorm cxt validation */
+	cxt->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+	/* xcontext validation */
+	cxt->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+					     u8 fw_sb_id, u8 sb_index,
+					     u8 ticks)
+{
+
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+	REG_WR8(bp, addr, ticks);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+			  port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+					     u16 fw_sb_id, u8 sb_index,
+					     u8 disable)
+{
+	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+	u16 flags = REG_RD16(bp, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= enable_flag;
+	REG_WR16(bp, addr, flags);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+			  port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec)
+{
+	int port = BP_PORT(bp);
+	u8 ticks = usec / BNX2X_BTR;
+
+	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+	disable = disable ? 1 : (usec ? 0 : 1);
+	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 1a3545b..c016e20 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -18,11 +18,15 @@
 #define BNX2X_CMN_H
 
 #include <linux/types.h>
+#include <linux/pci.h>
 #include <linux/netdevice.h>
 
 
 #include "bnx2x.h"
 
+/* This is used as a replacement for an MCP if it's not present */
+extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+
 extern int num_queues;
 
 /************************ Macros ********************************/
@@ -61,6 +65,73 @@
 /*********************** Interfaces ****************************
  *  Functions that need to be implemented by each driver version
  */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters.
+ *
+ * @bp:			driver handle
+ * @ind_table:		indirection table to configure
+ * @config_hash:	re-configure RSS hash keys configuration
+ */
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp:			driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to the fastpath structure
+ * @leading:	boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp:		driver handle
+ * @command:	request
+ * @param:	request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 
 /**
  * bnx2x_initial_phy_init - initialize link parameters structure variables.
@@ -88,6 +159,32 @@
 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
 
 /**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp:		driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp:		driver handle
+ * @igu_sb_id:	SB id
+ * @segment:	SB segment
+ * @index:	SB index
+ * @op:		SB operation
+ * @update:	is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+
+/**
  * bnx2x__link_status_update - handles link status change.
  *
  * @bp:		driver handle
@@ -165,21 +262,6 @@
 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
 
 /**
- * bnx2x_init_firmware - loads device firmware
- *
- * @bp:		driver handle
- */
-int bnx2x_init_firmware(struct bnx2x *bp);
-
-/**
- * bnx2x_init_hw - init HW blocks according to current initialization stage.
- *
- * @bp:		driver handle
- * @load_code:	COMMON, PORT or FUNCTION
- */
-int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
-
-/**
  * bnx2x_nic_init - init driver internals.
  *
  * @bp:		driver handle
@@ -207,16 +289,6 @@
 void bnx2x_free_mem(struct bnx2x *bp);
 
 /**
- * bnx2x_setup_client - setup eth client.
- *
- * @bp:		driver handle
- * @fp:		pointer to fastpath structure
- * @is_leading:	boolean
- */
-int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-		       int is_leading);
-
-/**
  * bnx2x_set_num_queues - set number of queues according to mode.
  *
  * @bp:		driver handle
@@ -252,6 +324,13 @@
 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
 
 /**
+ * bnx2x_release_leader_lock - release recovery leader lock
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_release_leader_lock(struct bnx2x *bp);
+
+/**
  * bnx2x_set_eth_mac - configure eth MAC address in the HW
  *
  * @bp:		driver handle
@@ -259,29 +338,7 @@
  *
  * Configures according to the value in netdev->dev_addr.
  */
-void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-
-#ifdef BCM_CNIC
-/**
- * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
- *
- * @bp:		driver handle
- * @set:	set or clear the CAM entry
- *
- * Used next enties in the CAM after the ETH MAC(s).
- * This function will wait until the ramdord completion returns.
- * Return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
-
-/**
- * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
- *
- * @bp:		driver handle
- * @set:	set or clear
- */
-int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
-#endif
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
 
 /**
  * bnx2x_set_rx_mode - set MAC filtering configurations.
@@ -289,6 +346,8 @@
  * @dev:	netdevice
  *
  * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
  */
 void bnx2x_set_rx_mode(struct net_device *dev);
 
@@ -296,25 +355,38 @@
  * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
  *
  * @bp:		driver handle
+ *
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh().
  */
 void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 
+/**
+ * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
+ *
+ * @bp:			driver handle
+ * @cl_id:		client id
+ * @rx_mode_flags:	rx mode configuration
+ * @rx_accept_flags:	rx accept configuration
+ * @tx_accept_flags:	tx accept configuration (tx switch)
+ * @ramrod_flags:	ramrod configuration
+ */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			 unsigned long rx_mode_flags,
+			 unsigned long rx_accept_flags,
+			 unsigned long tx_accept_flags,
+			 unsigned long ramrod_flags);
+
 /* Parity errors related */
 void bnx2x_inc_load_cnt(struct bnx2x *bp);
 u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
-bool bnx2x_chk_parity_attn(struct bnx2x *bp);
-bool bnx2x_reset_is_done(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 
 /**
- * bnx2x_stats_handle - perform statistics handling according to event.
- *
- * @bp:		driver handle
- * @event:	bnx2x_stats_event
- */
-void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-
-/**
  * bnx2x_sp_event - handle ramrods completion.
  *
  * @fp:		fastpath handle for the event
@@ -323,15 +395,6 @@
 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
 
 /**
- * bnx2x_func_start - init function
- *
- * @bp:		driver handle
- *
- * Must be called before sending CLIENT_SETUP for the first client.
- */
-int bnx2x_func_start(struct bnx2x *bp);
-
-/**
  * bnx2x_ilt_set_info - prepare ILT configurations.
  *
  * @bp:		driver handle
@@ -362,6 +425,10 @@
  * @value:	new value
  */
 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp);
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 
 /* dev_close main block */
 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
@@ -375,11 +442,17 @@
 /* select_queue callback */
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
 
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 
 /* NAPI poll Rx part */
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
 
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
+
 /* NAPI poll Tx part */
 int bnx2x_tx_int(struct bnx2x_fastpath *fp);
 
@@ -392,7 +465,6 @@
 
 void bnx2x_free_fp_mem(struct bnx2x *bp);
 int bnx2x_alloc_fp_mem(struct bnx2x *bp);
-
 void bnx2x_init_rx_rings(struct bnx2x *bp);
 void bnx2x_free_skbs(struct bnx2x *bp);
 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
@@ -457,19 +529,20 @@
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 {
 	barrier(); /* status block is written to by the chip */
 	fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
 }
 
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
-					struct bnx2x_fastpath *fp,
-					u16 bd_prod, u16 rx_comp_prod,
-					u16 rx_sge_prod)
+static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
+			struct bnx2x_fastpath *fp, u16 bd_prod,
+			u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
 {
 	struct ustorm_eth_rx_producers rx_prods = {0};
-	int i;
+	u32 i;
 
 	/* Update producers */
 	rx_prods.bd_prod = bd_prod;
@@ -486,10 +559,8 @@
 	 */
 	wmb();
 
-	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
-		REG_WR(bp,
-		       BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
-		       ((u32 *)&rx_prods)[i]);
+	for (i = 0; i < sizeof(rx_prods)/4; i++)
+		REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
 
 	mmiowb(); /* keep prod updates ordered */
 
@@ -519,7 +590,7 @@
 	barrier();
 }
 
-static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
 					  u8 idu_sb_id, bool is_Pf)
 {
 	u32 data, ctl, cnt = 100;
@@ -527,7 +598,7 @@
 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
 	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
 	u32 sb_bit =  1 << (idu_sb_id%32);
-	u32 func_encode = BP_FUNC(bp) |
+	u32 func_encode = func |
 			((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
 
@@ -590,15 +661,6 @@
 	barrier();
 }
 
-static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
-		      u16 index, u8 op, u8 update)
-{
-	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
-	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
-			     igu_addr);
-}
-
 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
 				u16 index, u8 op, u8 update)
 {
@@ -705,7 +767,7 @@
 }
 
 /**
- * disables tx from stack point of view
+ * bnx2x_tx_disable - disables tx from stack point of view
  *
  * @bp:		driver handle
  */
@@ -740,7 +802,7 @@
 	int i;
 
 	/* Add NAPI objects */
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
@@ -749,7 +811,7 @@
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
@@ -779,7 +841,7 @@
 		int idx = RX_SGE_CNT * i - 1;
 
 		for (j = 0; j < 2; j++) {
-			SGE_MASK_CLEAR_BIT(fp, idx);
+			BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
 			idx--;
 		}
 	}
@@ -789,7 +851,7 @@
 {
 	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
 	memset(fp->sge_mask, 0xff,
-	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+	       (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
 
 	/* Clear the two last indices in the page to 1:
 	   these are the indices that correspond to the "next" element,
@@ -871,12 +933,61 @@
 				   dma_unmap_addr(cons_rx_buf, mapping),
 				   RX_COPY_THRESH, DMA_FROM_DEVICE);
 
-	prod_rx_buf->skb = cons_rx_buf->skb;
 	dma_unmap_addr_set(prod_rx_buf, mapping,
 			   dma_unmap_addr(cons_rx_buf, mapping));
+	prod_rx_buf->skb = cons_rx_buf->skb;
 	*prod_bd = *cons_bd;
 }
 
+/************************* Init ******************************************/
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp:		driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_start_params *start_params =
+		&func_params.params.start;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_START;
+
+	/* Function parameters */
+	start_params->mf_mode = bp->mf_mode;
+	start_params->sd_vlan_tag = bp->mf_ov;
+		start_params->network_cos_mode = OVERRIDE_COS;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi:	pointer to upper part
+ * @fw_mid:	pointer to middle part
+ * @fw_lo:	pointer to lower part
+ * @mac:	pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
+					 u8 *mac)
+{
+	((u8 *)fw_hi)[0]  = mac[1];
+	((u8 *)fw_hi)[1]  = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lo)[0]  = mac[5];
+	((u8 *)fw_lo)[1]  = mac[4];
+}
+
 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 					   struct bnx2x_fastpath *fp, int last)
 {
@@ -895,21 +1006,20 @@
 	int i;
 
 	for (i = 0; i < last; i++) {
-		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
-		struct sk_buff *skb = rx_buf->skb;
+		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+		struct sk_buff *skb = first_buf->skb;
 
 		if (skb == NULL) {
 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
 			continue;
 		}
-
-		if (fp->tpa_state[i] == BNX2X_TPA_START)
+		if (tpa_info->tpa_state == BNX2X_TPA_START)
 			dma_unmap_single(&bp->pdev->dev,
-					 dma_unmap_addr(rx_buf, mapping),
+					 dma_unmap_addr(first_buf, mapping),
 					 fp->rx_buf_size, DMA_FROM_DEVICE);
-
 		dev_kfree_skb(skb);
-		rx_buf->skb = NULL;
+		first_buf->skb = NULL;
 	}
 }
 
@@ -1038,31 +1148,201 @@
 	return i - fp->eth_q_stats.rx_skb_alloc_failed;
 }
 
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+	if (!CHIP_IS_E1x(fp->bp))
+		return fp->cl_id;
+	else
+		return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+					       bnx2x_obj_type obj_type)
+{
+	struct bnx2x *bp = fp->bp;
+
+	/* Configure classification DBs */
+	bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
+			   BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+			   bnx2x_sp_mapping(bp, mac_rdata),
+			   BNX2X_FILTER_MAC_PENDING,
+			   &bp->sp_state, obj_type,
+			   &bp->macs_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp:		driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+	u8 func_num = 0, i;
+
+	/* 57710 has only one function per-port */
+	if (CHIP_IS_E1(bp))
+		return 1;
+
+	/* Calculate a number of functions enabled on the current
+	 * PATH/PORT.
+	 */
+	if (CHIP_REV_IS_SLOW(bp)) {
+		if (IS_MF(bp))
+			func_num = 4;
+		else
+			func_num = 2;
+	} else {
+		for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+			u32 func_config =
+				MF_CFG_RD(bp,
+					  func_mf_config[BP_PORT(bp) + 2 * i].
+					  config);
+			func_num +=
+				((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+		}
+	}
+
+	WARN_ON(!func_num);
+
+	return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+	/* RX_MODE controlling object */
+	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+	/* multicast configuration controlling object */
+	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+			     BP_FUNC(bp), BP_FUNC(bp),
+			     bnx2x_sp(bp, mcast_rdata),
+			     bnx2x_sp_mapping(bp, mcast_rdata),
+			     BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+			     BNX2X_OBJ_TYPE_RX);
+
+	/* Setup CAM credit pools */
+	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+				   bnx2x_get_path_func_num(bp));
+
+	/* RSS configuration object */
+	bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+				  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+				  bnx2x_sp(bp, rss_rdata),
+				  bnx2x_sp_mapping(bp, rss_rdata),
+				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+				  BNX2X_OBJ_TYPE_RX);
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+	else
+		return fp->cl_id;
+}
+
+static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+
+	if (!CHIP_IS_E1x(bp))
+		return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+	else
+		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+}
+
+
 #ifdef BCM_CNIC
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+	return bp->cnic_base_cl_id + cl_idx +
+		(bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+
+	/* the 'first' id is allocated for the cnic */
+	return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+	return bp->igu_base_sb;
+}
+
+
 static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
 {
-	bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
-		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+	unsigned long q_type = 0;
+
+	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+						     BNX2X_FCOE_ETH_CL_ID_IDX);
+	/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
+	 *  16 ETH clients per function when CNIC is enabled!
+	 *
+	 *  Fix it ASAP!!!
+	 */
 	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
 	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
 	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
 	bnx2x_fcoe(bp, bp) = bp;
-	bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
 	bnx2x_fcoe(bp, index) = FCOE_IDX;
 	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
 	bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
 	/* qZone id equals to FW (per path) client id */
-	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
-		BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
-				ETH_MAX_RX_CLIENTS_E1H);
+	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
 	/* init shortcut */
-	bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
-	    USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
-	    USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
+	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+		bnx2x_rx_ustorm_prods_offset(fp);
 
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp),
+		bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata),
+			      q_type);
+
+	DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
+			   "igu_sb %d\n",
+	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
 }
 #endif
 
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp)
+{
+	int cnt = 1000;
+
+	while (bnx2x_has_tx_work_unload(fp)) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for queue[%d]: "
+				 "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n",
+				  fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+			bnx2x_panic();
+			return -EBUSY;
+#else
+			break;
+#endif
+		}
+		cnt--;
+		usleep_range(1000, 1000);
+	}
+
+	return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
 static inline void __storm_memset_struct(struct bnx2x *bp,
 					 u32 addr, size_t size, u32 *data)
 {
@@ -1071,42 +1351,78 @@
 		REG_WR(bp, addr + (i * 4), data[i]);
 }
 
-static inline void storm_memset_mac_filters(struct bnx2x *bp,
-			struct tstorm_eth_mac_filter_config *mac_filters,
-			u16 abs_fid)
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+				struct tstorm_eth_function_common_config *tcfg,
+				u16 abs_fid)
 {
-	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+	size_t size = sizeof(struct tstorm_eth_function_common_config);
 
 	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
+			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
 
-	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
 }
 
 static inline void storm_memset_cmng(struct bnx2x *bp,
 				struct cmng_struct_per_port *cmng,
 				u8 port)
 {
-	size_t size =
-		sizeof(struct rate_shaping_vars_per_port) +
-		sizeof(struct fairness_vars_per_port) +
-		sizeof(struct safc_struct_per_port) +
-		sizeof(struct pfc_struct_per_port);
+	size_t size = sizeof(struct cmng_struct_per_port);
 
 	u32 addr = BAR_XSTRORM_INTMEM +
 			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
 
 	__storm_memset_struct(bp, addr, size, (u32 *)cmng);
-
-	addr += size + 4 /* SKIP DCB+LLFC */;
-	size = sizeof(struct cmng_struct_per_port) -
-		size /* written */ - 4 /*skipped*/;
-
-	__storm_memset_struct(bp, addr, size,
-			      (u32 *)(cmng->traffic_type_to_priority_cos));
 }
 
-/* HW Lock for shared dual port PHYs */
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp:		driver handle
+ * @mask:	bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+	int tout = 5000; /* Wait for 5 secs tops */
+
+	while (tout--) {
+		smp_mb();
+		netif_addr_lock_bh(bp->dev);
+		if (!(bp->sp_state & mask)) {
+			netif_addr_unlock_bh(bp->dev);
+			return true;
+		}
+		netif_addr_unlock_bh(bp->dev);
+
+		usleep_range(1000, 1000);
+	}
+
+	smp_mb();
+
+	netif_addr_lock_bh(bp->dev);
+	if (bp->sp_state & mask) {
+		BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
+			  "mask 0x%lx\n", bp->sp_state, mask);
+		netif_addr_unlock_bh(bp->dev);
+		return false;
+	}
+	netif_addr_unlock_bh(bp->dev);
+
+	return true;
+}
+
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp:		driver handle
+ * @cxt:	context of the connection on the host memory
+ * @cid:	SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec);
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 410a49e..b51a759 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -47,34 +47,39 @@
 				struct cos_help_data *cos_data,
 				u32 *pg_pri_orginal_spread,
 				struct dcbx_ets_feature *ets);
-static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp);
 
 
 static void bnx2x_pfc_set(struct bnx2x *bp)
 {
 	struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
 	u32 pri_bit, val = 0;
-	u8 pri;
+	int i;
+
+	pfc_params.num_of_rx_cos_priority_mask =
+					bp->dcbx_port_params.ets.num_of_cos;
 
 	/* Tx COS configuration */
-	if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
-		pfc_params.rx_cos0_priority_mask =
-			bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
-	if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
-		pfc_params.rx_cos1_priority_mask =
-			bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
+	for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+		/*
+		 * We configure only the pauseable bits (non pauseable aren't
+		 * configured at all) it's done to avoid false pauses from
+		 * network
+		 */
+		pfc_params.rx_cos_priority_mask[i] =
+			bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+				& DCBX_PFC_PRI_PAUSE_MASK(bp);
 
-
-	/**
+	/*
 	 * Rx COS configuration
 	 * Changing PFC RX configuration .
 	 * In RX COS0 will always be configured to lossy and COS1 to lossless
 	 */
-	for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
-		pri_bit = 1 << pri;
+	for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+		pri_bit = 1 << i;
 
 		if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
-			val |= 1 << (pri * 4);
+			val |= 1 << (i * 4);
 	}
 
 	pfc_params.pkt_priority_to_cos = val;
@@ -253,12 +258,11 @@
 
 
 	/* Clean up old settings of ets on COS */
-	for (i = 0; i < E2_NUM_OF_COS ; i++) {
-
+	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
 		cos_params[i].pauseable = false;
-		cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+		cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
 		cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
-		cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
+		cos_params[i].pri_bitmask = 0;
 	}
 
 	if (bp->dcbx_port_params.app.enabled &&
@@ -378,25 +382,19 @@
 
 static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
 {
-	if (CHIP_IS_E2(bp)) {
-		if (BP_PORT(bp)) {
-			BNX2X_ERR("4 port mode is not supported");
-			return;
-		}
-
-		if (bp->dcbx_port_params.pfc.enabled)
-
-			/* 1. Fills up common PFC structures if required.*/
-			/* 2. Configure NIG, MAC and BRB via the elink:
-			 *    elink must first check if BMAC is not in reset
-			 *    and only then configures the BMAC
-			 *    Or, configure EMAC.
-			 */
-			bnx2x_pfc_set(bp);
-
-		else
-			bnx2x_pfc_clear(bp);
+	if (BP_PORT(bp)) {
+		BNX2X_ERR("4 port mode is not supported");
+		return;
 	}
+
+	if (bp->dcbx_port_params.pfc.enabled)
+		/*
+		 * 1. Fills up common PFC structures if required
+		 * 2. Configure NIG, MAC and BRB via the elink
+		 */
+		bnx2x_pfc_set(bp);
+	else
+		bnx2x_pfc_clear(bp);
 }
 
 static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
@@ -406,32 +404,27 @@
 		      0 /* connectionless */,
 		      0 /* dataHi is zero */,
 		      0 /* dataLo is zero */,
-		      1 /* common */);
+		      NONE_CONNECTION_TYPE);
 }
 
 static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 {
-	bnx2x_pfc_fw_struct_e2(bp);
+	bnx2x_dcbx_fw_struct(bp);
 	DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
 	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
 		      0, /* connectionless */
 		      U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
 		      U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
-		      1  /* commmon */);
+		      NONE_CONNECTION_TYPE);
 }
 
-static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
 {
 	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
-	u8	status = 0;
+	int rc = 0;
 
-	bnx2x_ets_disabled(&bp->link_params);
-
-	if (!ets->enabled)
-		return;
-
-	if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
-		BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
+	if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+		BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
 		return;
 	}
 
@@ -440,9 +433,9 @@
 		return;
 
 	/* sanity */
-	if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
+	if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
 	     (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
-	    ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
+	    ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
 	     (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
 		BNX2X_ERR("all COS should have at least bw_limit or strict"
 			    "ets->cos_params[0].strict= %x"
@@ -474,17 +467,70 @@
 
 		bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
 	} else {
-		if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
-			status = bnx2x_ets_strict(&bp->link_params, 0);
+		if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 0);
 		else if (ets->cos_params[1].strict
-						== BNX2X_DCBX_COS_HIGH_STRICT)
-			status = bnx2x_ets_strict(&bp->link_params, 1);
-
-		if (status)
+					== BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 1);
+		if (rc)
 			BNX2X_ERR("update_ets_params failed\n");
 	}
 }
 
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+	struct bnx2x_ets_params ets_params = { 0 };
+	u8 i;
+
+	ets_params.num_of_cos = ets->num_of_cos;
+
+	for (i = 0; i < ets->num_of_cos; i++) {
+		/* COS is SP */
+		if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+
+			ets_params.cos[i].state = bnx2x_cos_state_strict;
+			ets_params.cos[i].params.sp_params.pri =
+						ets->cos_params[i].strict;
+		} else { /* COS is BW */
+			if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+			ets_params.cos[i].state = bnx2x_cos_state_bw;
+			ets_params.cos[i].params.bw_params.bw =
+						(u8)ets->cos_params[i].bw_tbl;
+		}
+	}
+
+	/* Configure the ETS in HW */
+	if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+				  &ets_params)) {
+		BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+		bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+	}
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+	bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+	if (!bp->dcbx_port_params.ets.enabled)
+		return;
+
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_update_ets_config(bp);
+	else
+		bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
 #ifdef BCM_DCBNL
 static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
 {
@@ -527,6 +573,7 @@
 		BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
 		return -EINVAL;
 	}
+
 	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
 				 DCBX_READ_LOCAL_MIB);
 
@@ -563,15 +610,6 @@
 		DCB_APP_IDTYPE_ETHTYPE;
 }
 
-static inline
-void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
-{
-	int i;
-	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
-		bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
-							~DCBX_APP_ENTRY_VALID;
-}
-
 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 {
 	int i, err = 0;
@@ -597,32 +635,28 @@
 }
 #endif
 
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+	if (SHMEM2_HAS(bp, drv_flags)) {
+		u32 drv_flags;
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+		drv_flags = SHMEM2_RD(bp, drv_flags);
+
+		if (set)
+			SET_FLAGS(drv_flags, flags);
+		else
+			RESET_FLAGS(drv_flags, flags);
+
+		SHMEM2_WR(bp, drv_flags, drv_flags);
+		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+	}
+}
+
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 {
 	switch (state) {
 	case BNX2X_DCBX_STATE_NEG_RECEIVED:
-#ifdef BCM_CNIC
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-			struct cnic_ops *c_ops;
-			struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-			bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-			cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
-			cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
-
-			rcu_read_lock();
-			c_ops = rcu_dereference(bp->cnic_ops);
-			if (c_ops) {
-				bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
-				rcu_read_unlock();
-				return;
-			}
-			rcu_read_unlock();
-		}
-
-		/* fall through if no CNIC initialized  */
-	case BNX2X_DCBX_STATE_ISCSI_STOPPED:
-#endif
-
 		{
 			DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
 #ifdef BCM_DCBNL
@@ -646,41 +680,28 @@
 			bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 						 bp->dcbx_error);
 
-			if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-#ifdef BCM_DCBNL
-				/**
-				 * Add new app tlvs to dcbnl
-				 */
-				bnx2x_dcbnl_update_applist(bp, false);
-#endif
-				bnx2x_dcbx_stop_hw_tx(bp);
-				return;
-			}
-			/* fall through */
+			/* mark DCBX result for PMF migration */
+			bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 #ifdef BCM_DCBNL
 			/**
-			 * Invalidate the local app tlvs if they are not added
-			 * to the dcbnl app list to avoid deleting them from
-			 * the list later on
+			 * Add new app tlvs to dcbnl
 			 */
-			bnx2x_dcbx_invalidate_local_apps(bp);
+			bnx2x_dcbnl_update_applist(bp, false);
 #endif
+			bnx2x_dcbx_stop_hw_tx(bp);
+
+			return;
 		}
 	case BNX2X_DCBX_STATE_TX_PAUSED:
 		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
 		bnx2x_pfc_set_pfc(bp);
 
 		bnx2x_dcbx_update_ets_params(bp);
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-			bnx2x_dcbx_resume_hw_tx(bp);
-			return;
-		}
-		/* fall through */
+		bnx2x_dcbx_resume_hw_tx(bp);
+		return;
 	case BNX2X_DCBX_STATE_TX_RELEASED:
 		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
-			bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
-
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
 		return;
 	default:
 		BNX2X_ERR("Unknown DCBX_STATE\n");
@@ -868,7 +889,7 @@
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 {
-	if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
+	if (!CHIP_IS_E1x(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
 		bp->dcb_state = dcb_on;
 		bp->dcbx_enabled = dcbx_enabled;
 	} else {
@@ -966,7 +987,7 @@
 	DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
 	   bp->dcb_state, bp->port.pmf);
 
-	if (bp->dcb_state ==  BNX2X_DCB_STATE_ON && bp->port.pmf &&
+	if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
 	    SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
 		dcbx_lldp_params_offset =
 			SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -974,6 +995,8 @@
 		DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
 		   dcbx_lldp_params_offset);
 
+		bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+
 		if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
 			bnx2x_dcbx_lldp_updated_params(bp,
 						       dcbx_lldp_params_offset);
@@ -981,46 +1004,12 @@
 			bnx2x_dcbx_admin_mib_updated_params(bp,
 				dcbx_lldp_params_offset);
 
-			/* set default configuration BC has */
-			bnx2x_dcbx_set_params(bp,
-					      BNX2X_DCBX_STATE_NEG_RECEIVED);
-
+			/* Let HW start negotiation */
 			bnx2x_fw_command(bp,
 					 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
 		}
 	}
 }
-
-void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
-{
-	struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
-	u32 i = 0, addr;
-	memset(pricos, 0, sizeof(pricos));
-	/* Default initialization */
-	for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
-		pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
-
-	/* Store per port struct to internal memory */
-	addr = BAR_XSTRORM_INTMEM +
-			XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-			offsetof(struct cmng_struct_per_port,
-				 traffic_type_to_priority_cos);
-	__storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
-
-
-	/* LLFC disabled.*/
-	REG_WR8(bp , BAR_XSTRORM_INTMEM +
-		    XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-		    offsetof(struct cmng_struct_per_port, llfc_mode),
-			LLFC_MODE_NONE);
-
-	/* DCBX disabled.*/
-	REG_WR8(bp , BAR_XSTRORM_INTMEM +
-		    XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-		    offsetof(struct cmng_struct_per_port, dcb_enabled),
-			DCB_DISABLED);
-}
-
 static void
 bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
 			    struct flow_control_configuration *pfc_fw_cfg)
@@ -1171,7 +1160,7 @@
 			/* If we join a group and one is strict
 			 * than the bw rulls */
 			cos_data->data[entry].strict =
-						BNX2X_DCBX_COS_HIGH_STRICT;
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
 	}
 	if ((0 == cos_data->data[0].pri_join_mask) &&
 	    (0 == cos_data->data[1].pri_join_mask))
@@ -1183,7 +1172,7 @@
 #define POWER_OF_2(x)	((0 != x) && (0 == (x & (x-1))))
 #endif
 
-static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
 					      struct pg_help_data *pg_help_data,
 					      struct cos_help_data *cos_data,
 					      u32 pri_join_mask,
@@ -1263,14 +1252,16 @@
 			if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
 			    DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
 				cos_data->data[0].strict =
-					BNX2X_DCBX_COS_HIGH_STRICT;
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
 				cos_data->data[1].strict =
-					BNX2X_DCBX_COS_LOW_STRICT;
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
 			} else {
 				cos_data->data[0].strict =
-					BNX2X_DCBX_COS_LOW_STRICT;
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
 				cos_data->data[1].strict =
-					BNX2X_DCBX_COS_HIGH_STRICT;
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
 			}
 			/* Pauseable */
 			cos_data->data[0].pausable = true;
@@ -1306,13 +1297,16 @@
 			 * and that with the highest priority
 			 * gets the highest strict priority in the arbiter.
 			 */
-			cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
-			cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+			cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+			cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
 		}
 	}
 }
 
-static void bnx2x_dcbx_two_pg_to_cos_params(
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
 			    struct bnx2x		*bp,
 			    struct  pg_help_data	*pg_help_data,
 			    struct dcbx_ets_feature	*ets,
@@ -1322,7 +1316,7 @@
 			    u8				num_of_dif_pri)
 {
 	u8 i = 0;
-	u8 pg[E2_NUM_OF_COS] = {0};
+	u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
 
 	/* If there are both pauseable and non-pauseable priorities,
 	 * the pauseable priorities go to the first queue and
@@ -1378,16 +1372,68 @@
 	}
 
 	/* There can be only one strict pg */
-	for (i = 0 ; i < E2_NUM_OF_COS; i++) {
+	for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
 		if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
 			cos_data->data[i].cos_bw =
 				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
 		else
-			cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+			cos_data->data[i].strict =
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
 	}
 }
 
-static void bnx2x_dcbx_three_pg_to_cos_params(
+static int bnx2x_dcbx_join_pgs(
+			      struct bnx2x            *bp,
+			      struct dcbx_ets_feature *ets,
+			      struct pg_help_data     *pg_help_data,
+			      u8                      required_num_of_pg)
+{
+	u8 entry_joined    = pg_help_data->num_of_pg - 1;
+	u8 entry_removed   = entry_joined + 1;
+	u8 pg_joined       = 0;
+
+	if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+						<= pg_help_data->num_of_pg) {
+
+		BNX2X_ERR("required_num_of_pg can't be zero\n");
+		return -EINVAL;
+	}
+
+	while (required_num_of_pg < pg_help_data->num_of_pg) {
+		entry_joined = pg_help_data->num_of_pg - 2;
+		entry_removed = entry_joined + 1;
+		/* protect index */
+		entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+		pg_help_data->data[entry_joined].pg_priority |=
+			pg_help_data->data[entry_removed].pg_priority;
+
+		pg_help_data->data[entry_joined].num_of_dif_pri +=
+			pg_help_data->data[entry_removed].num_of_dif_pri;
+
+		if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+		    pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+			/* Entries joined strict priority rules */
+			pg_help_data->data[entry_joined].pg =
+							DCBX_STRICT_PRI_PG;
+		else {
+			/* Entries can be joined join BW */
+			pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_joined].pg) +
+				    DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_removed].pg);
+
+			DCBX_PG_BW_SET(ets->pg_bw_tbl,
+				pg_help_data->data[entry_joined].pg, pg_joined);
+		}
+		/* Joined the entries */
+		pg_help_data->num_of_pg--;
+	}
+
+	return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
 			      struct bnx2x		*bp,
 			      struct pg_help_data	*pg_help_data,
 			      struct dcbx_ets_feature	*ets,
@@ -1459,24 +1505,203 @@
 				/* If we join a group and one is strict
 				 * than the bw rulls */
 				cos_data->data[1].strict =
-					BNX2X_DCBX_COS_HIGH_STRICT;
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
 			}
 		}
 	}
 }
 
 
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       struct cos_help_data *cos_data,
+				       u32 *pg_pri_orginal_spread,
+				       u32 pri_join_mask,
+				       u8 num_of_dif_pri)
+{
+
+	/* default E2 settings */
+	cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+	switch (help_data->num_of_pg) {
+	case 1:
+		bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+					       bp,
+					       help_data,
+					       cos_data,
+					       pri_join_mask,
+					       num_of_dif_pri);
+		break;
+	case 2:
+		bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+					    bp,
+					    help_data,
+					    ets,
+					    cos_data,
+					    pg_pri_orginal_spread,
+					    pri_join_mask,
+					    num_of_dif_pri);
+		break;
+
+	case 3:
+		bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+					      bp,
+					      help_data,
+					      ets,
+					      cos_data,
+					      pg_pri_orginal_spread,
+					      pri_join_mask,
+					      num_of_dif_pri);
+		break;
+	default:
+		BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+		bnx2x_dcbx_ets_disabled_entry_data(bp,
+						   cos_data, pri_join_mask);
+	}
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+					struct cos_help_data *cos_data,
+					u8 entry,
+					u8 num_spread_of_entries,
+					u8 strict_app_pris)
+{
+	u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+	u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+	u8 app_pri_bit = 0;
+
+	while (num_spread_of_entries && num_of_app_pri > 0) {
+		app_pri_bit = 1 << (num_of_app_pri - 1);
+		if (app_pri_bit & strict_app_pris) {
+			struct cos_entry_help_data *data = &cos_data->
+								data[entry];
+			num_spread_of_entries--;
+			if (num_spread_of_entries == 0) {
+				/* last entry needed put all the entries left */
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = strict_app_pris;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			} else {
+				strict_app_pris &= ~app_pri_bit;
+
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = app_pri_bit;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			}
+
+			strict_pri =
+			    BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+			entry++;
+		}
+
+		num_of_app_pri--;
+	}
+
+	if (num_spread_of_entries)
+		return -EINVAL;
+
+	return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+					 struct cos_help_data *cos_data,
+					 u8 entry,
+					 u8 num_spread_of_entries,
+					 u8 strict_app_pris)
+{
+
+	if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+					 num_spread_of_entries,
+					 strict_app_pris)) {
+		struct cos_entry_help_data *data = &cos_data->
+						    data[entry];
+		/* Fill BW entry */
+		data->cos_bw = DCBX_INVALID_COS_BW;
+		data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+		data->pri_join_mask = strict_app_pris;
+		data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+				 data->pri_join_mask);
+		return 1;
+	}
+
+	return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+					   struct pg_help_data *help_data,
+					   struct dcbx_ets_feature *ets,
+					   struct cos_help_data *cos_data,
+					   u32 pri_join_mask)
+
+{
+	u8 need_num_of_entries = 0;
+	u8 i = 0;
+	u8 entry = 0;
+
+	/*
+	 * if the number of requested PG-s in CEE is greater than 3
+	 * then the results are not determined since this is a violation
+	 * of the standard.
+	 */
+	if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+		if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+					DCBX_COS_MAX_NUM_E3B0)) {
+			BNX2X_ERR("Unable to reduce the number of PGs -"
+				  "we will disables ETS\n");
+			bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+							   pri_join_mask);
+			return;
+		}
+	}
+
+	for (i = 0 ; i < help_data->num_of_pg; i++) {
+		struct pg_entry_help_data *pg =  &help_data->data[i];
+		if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+			struct cos_entry_help_data *data = &cos_data->
+							    data[entry];
+			/* Fill BW entry */
+			data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+			data->strict = BNX2X_DCBX_STRICT_INVALID;
+			data->pri_join_mask = pg->pg_priority;
+			data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+						data->pri_join_mask);
+
+			entry++;
+		} else {
+			need_num_of_entries =  min_t(u8,
+				(u8)pg->num_of_dif_pri,
+				(u8)DCBX_COS_MAX_NUM_E3B0 -
+						 help_data->num_of_pg + 1);
+			/*
+			 * If there are still VOQ-s which have no associated PG,
+			 * then associate these VOQ-s to PG15. These PG-s will
+			 * be used for SP between priorities on PG15.
+			 */
+			entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+				entry, need_num_of_entries, pg->pg_priority);
+		}
+	}
+
+	/* the entry will represent the number of COSes used */
+	cos_data->num_of_cos = entry;
+}
 static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
 				       struct pg_help_data *help_data,
 				       struct dcbx_ets_feature *ets,
 				       u32 *pg_pri_orginal_spread)
 {
-	struct cos_help_data         cos_data ;
+	struct cos_help_data         cos_data;
 	u8                    i                           = 0;
 	u32                   pri_join_mask               = 0;
 	u8                    num_of_dif_pri              = 0;
 
 	memset(&cos_data, 0, sizeof(cos_data));
+
 	/* Validate the pg value */
 	for (i = 0; i < help_data->num_of_pg ; i++) {
 		if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
@@ -1487,74 +1712,65 @@
 		num_of_dif_pri  += help_data->data[i].num_of_dif_pri;
 	}
 
-	/* default settings */
-	cos_data.num_of_cos = 2;
-	for (i = 0; i < E2_NUM_OF_COS ; i++) {
-		cos_data.data[i].pri_join_mask    = pri_join_mask;
-		cos_data.data[i].pausable         = false;
-		cos_data.data[i].strict           = BNX2X_DCBX_COS_NOT_STRICT;
-		cos_data.data[i].cos_bw           = DCBX_INVALID_COS_BW;
+	/* defaults */
+	cos_data.num_of_cos = 1;
+	for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+		cos_data.data[i].pri_join_mask = 0;
+		cos_data.data[i].pausable = false;
+		cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+		cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
 	}
 
-	switch (help_data->num_of_pg) {
-	case 1:
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+					       &cos_data, pri_join_mask);
+	else /* E2 + E3A0 */
+		bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+							  help_data, ets,
+							  &cos_data,
+							  pg_pri_orginal_spread,
+							  pri_join_mask,
+							  num_of_dif_pri);
 
-		bxn2x_dcbx_single_pg_to_cos_params(
-					       bp,
-					       help_data,
-					       &cos_data,
-					       pri_join_mask,
-					       num_of_dif_pri);
-		break;
-	case 2:
-		bnx2x_dcbx_two_pg_to_cos_params(
-					    bp,
-					    help_data,
-					    ets,
-					    &cos_data,
-					    pg_pri_orginal_spread,
-					    pri_join_mask,
-					    num_of_dif_pri);
-		break;
-
-	case 3:
-		bnx2x_dcbx_three_pg_to_cos_params(
-					      bp,
-					      help_data,
-					      ets,
-					      &cos_data,
-					      pg_pri_orginal_spread,
-					      pri_join_mask,
-					      num_of_dif_pri);
-
-		break;
-	default:
-		BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
-		bnx2x_dcbx_ets_disabled_entry_data(bp,
-						   &cos_data, pri_join_mask);
-	}
 
 	for (i = 0; i < cos_data.num_of_cos ; i++) {
-		struct bnx2x_dcbx_cos_params *params =
+		struct bnx2x_dcbx_cos_params *p =
 			&bp->dcbx_port_params.ets.cos_params[i];
 
-		params->pauseable = cos_data.data[i].pausable;
-		params->strict = cos_data.data[i].strict;
-		params->bw_tbl = cos_data.data[i].cos_bw;
-		if (params->pauseable) {
-			params->pri_bitmask =
-			DCBX_PFC_PRI_GET_PAUSE(bp,
-					cos_data.data[i].pri_join_mask);
+		p->strict = cos_data.data[i].strict;
+		p->bw_tbl = cos_data.data[i].cos_bw;
+		p->pri_bitmask = cos_data.data[i].pri_join_mask;
+		p->pauseable = cos_data.data[i].pausable;
+
+		/* sanity */
+		if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+		    p->strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (p->pri_bitmask == 0)
+				BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+			if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+				if (p->pauseable &&
+				    DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for "
+						  "pausable COS %d\n", i);
+
+				if (!p->pauseable &&
+				    DCBX_PFC_PRI_GET_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for "
+						  "nonpausable COS %d\n", i);
+			}
+		}
+
+		if (p->pauseable)
 			DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
 				  i, cos_data.data[i].pri_join_mask);
-		} else {
-			params->pri_bitmask =
-			DCBX_PFC_PRI_GET_NON_PAUSE(bp,
-					cos_data.data[i].pri_join_mask);
+		else
 			DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
 					  "0x%x\n",
 				  i, cos_data.data[i].pri_join_mask);
-		}
 	}
 
 	bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
@@ -1574,7 +1790,7 @@
 	}
 }
 
-static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp)
 {
 	struct flow_control_configuration   *pfc_fw_cfg = NULL;
 	u16 pri_bit = 0;
@@ -1591,13 +1807,7 @@
 
 	/* Fw version should be incremented each update */
 	pfc_fw_cfg->dcb_version = ++bp->dcb_version;
-	pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
-
-	/* Default initialization */
-	for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
-		tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
-		tt2cos[pri].cos = 0;
-	}
+	pfc_fw_cfg->dcb_enabled = 1;
 
 	/* Fill priority parameters */
 	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
@@ -1605,14 +1815,37 @@
 		pri_bit = 1 << tt2cos[pri].priority;
 
 		/* Fill COS parameters based on COS calculated to
-		 * make it more generally for future use */
+		 * make it more general for future use */
 		for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
 			if (bp->dcbx_port_params.ets.cos_params[cos].
 						pri_bitmask & pri_bit)
 					tt2cos[pri].cos = cos;
 	}
+
+	/* we never want the FW to add a 0 vlan tag */
+	pfc_fw_cfg->dont_add_pri_0_en = 1;
+
 	bnx2x_dcbx_print_cos_params(bp,	pfc_fw_cfg);
 }
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+	/* if we need to syncronize DCBX result from prev PMF
+	 * read it from shmem and update bp accordingly
+	 */
+	if (SHMEM2_HAS(bp, drv_flags) &&
+	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+		/* Read neg results if dcbx is in the FW */
+		if (bnx2x_dcbx_read_shmem_neg_results(bp))
+			return;
+
+		bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					  bp->dcbx_error);
+		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					 bp->dcbx_error);
+	}
+}
+
 /* DCB netlink */
 #ifdef BCM_DCBNL
 
@@ -1879,10 +2112,12 @@
 	if (bp->dcb_state) {
 		switch (tcid) {
 		case DCB_NUMTCS_ATTR_PG:
-			*num = E2_NUM_OF_COS;
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
 			break;
 		case DCB_NUMTCS_ATTR_PFC:
-			*num = E2_NUM_OF_COS;
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
 			break;
 		default:
 			rval = -EINVAL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index bed369d..2c6a3bc 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -27,22 +27,30 @@
 	u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
 };
 
-#define E2_NUM_OF_COS			2
-#define BNX2X_DCBX_COS_NOT_STRICT	0
-#define BNX2X_DCBX_COS_LOW_STRICT	1
-#define BNX2X_DCBX_COS_HIGH_STRICT	2
+#define DCBX_COS_MAX_NUM_E2	DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT	3
+#define DCBX_COS_MAX_NUM_E3B0	BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM	BNX2X_MAX_COS_SUPPORT
 
 struct bnx2x_dcbx_cos_params {
 	u32	bw_tbl;
 	u32	pri_bitmask;
+	/*
+	 * strict priority: valid values are 0..5; 0 is highest priority.
+	 * There can't be two COSes with the same priority.
+	 */
 	u8	strict;
+#define BNX2X_DCBX_STRICT_INVALID			DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST			0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp)	((sp) + 1)
 	u8	pauseable;
 };
 
 struct bnx2x_dcbx_pg_params {
 	u32 enabled;
 	u8 num_of_cos; /* valid COS entries */
-	struct bnx2x_dcbx_cos_params	cos_params[E2_NUM_OF_COS];
+	struct bnx2x_dcbx_cos_params	cos_params[DCBX_COS_MAX_NUM];
 };
 
 struct bnx2x_dcbx_pfc_params {
@@ -60,6 +68,8 @@
 #define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE		0
 #define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE		1
 #define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID	(BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+				  (bp)->dcbx_port_params.ets.enabled)
 
 struct bnx2x_config_lldp_params {
 	u32 overwrite_settings;
@@ -132,7 +142,7 @@
 };
 
 struct cos_help_data {
-	struct cos_entry_help_data	data[E2_NUM_OF_COS];
+	struct cos_entry_help_data	data[DCBX_COS_MAX_NUM];
 	u8				num_of_cos;
 };
 
@@ -148,6 +158,8 @@
 				((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
 #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri)	\
 			(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri)	\
+			(0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
 #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri)	\
 			(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
 #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
@@ -170,22 +182,18 @@
 
 /* forward DCB/PFC related declarations */
 struct bnx2x;
-void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
 void bnx2x_dcbx_update(struct work_struct *work);
 void bnx2x_dcbx_init_params(struct bnx2x *bp);
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
 
 enum {
 	BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
-#ifdef BCM_CNIC
-	BNX2X_DCBX_STATE_ISCSI_STOPPED,
-#endif
 	BNX2X_DCBX_STATE_TX_PAUSED,
 	BNX2X_DCBX_STATE_TX_RELEASED
 };
 
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
-
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
 /* DCB netlink */
 #ifdef BCM_DCBNL
 extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index fb3ff7c..407531a 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -25,34 +25,55 @@
 
 
 /*definitions */
-#define XSTORM_WAITP_ADDR    0x2b8a80
-#define TSTORM_WAITP_ADDR    0x1b8a80
-#define USTORM_WAITP_ADDR    0x338a80
-#define CSTORM_WAITP_ADDR    0x238a80
-#define TSTORM_CAM_MODE         0x1B1440
+#define XSTORM_WAITP_ADDR	0x2b8a80
+#define TSTORM_WAITP_ADDR	0x1b8a80
+#define USTORM_WAITP_ADDR	0x338a80
+#define CSTORM_WAITP_ADDR	0x238a80
+#define TSTORM_CAM_MODE	0x1B1440
 
-#define MAX_TIMER_PENDING      200
-#define TIMER_SCAN_DONT_CARE   0xFF
-#define RI_E1			0x1
-#define RI_E1H			0x2
-#define RI_E2			0x4
-#define RI_ONLINE		0x100
-#define RI_PATH0_DUMP		0x200
-#define RI_PATH1_DUMP		0x400
-#define RI_E1_OFFLINE		(RI_E1)
-#define RI_E1_ONLINE		(RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE		(RI_E1H)
-#define RI_E1H_ONLINE		(RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE		(RI_E2)
-#define RI_E2_ONLINE		(RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE	(RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE		(RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE	(RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE		(RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE		(RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE		(RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE         (RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE          (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define MAX_TIMER_PENDING	200
+#define TIMER_SCAN_DONT_CARE	0xFF
+#define RI_E1				0x1
+#define RI_E1H				0x2
+#define RI_E2				0x4
+#define RI_E3				0x8
+#define RI_ONLINE			0x100
+#define RI_PATH0_DUMP			0x200
+#define RI_PATH1_DUMP			0x400
+#define RI_E1_OFFLINE			(RI_E1)
+#define RI_E1_ONLINE			(RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE			(RI_E1H)
+#define RI_E1H_ONLINE			(RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE			(RI_E2)
+#define RI_E2_ONLINE			(RI_E2 | RI_ONLINE)
+#define RI_E3_OFFLINE			(RI_E3)
+#define RI_E3_ONLINE			(RI_E3 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE		(RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE			(RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1E1HE2_OFFLINE		(RI_E1 | RI_E1H | RI_E2)
+#define RI_E1E1HE2_ONLINE		(RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E1HE2_OFFLINE		(RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE			(RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE			(RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE			(RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_E1E3_OFFLINE			(RI_E1 | RI_E3)
+#define RI_E1E3_ONLINE			(RI_E1 | RI_E3 | RI_ONLINE)
+#define RI_E1HE3_OFFLINE		(RI_E1H | RI_E3)
+#define RI_E1HE3_ONLINE			(RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E2E3_OFFLINE			(RI_E2 | RI_E3)
+#define RI_E2E3_ONLINE			(RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE3_OFFLINE		(RI_E1 | RI_E1H | RI_E3)
+#define RI_E1E1HE3_ONLINE		(RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E1HE2E3_OFFLINE		(RI_E2 | RI_E1H | RI_E3)
+#define RI_E1HE2E3_ONLINE		(RI_E2 | RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E1E2E3_OFFLINE		(RI_E2 | RI_E1 | RI_E3)
+#define RI_E1E2E3_ONLINE		(RI_E2 | RI_E1 | RI_E3 | RI_ONLINE)
+#define RI_ALL_OFFLINE			(RI_E1 | RI_E1H | RI_E2 | RI_E3)
+#define RI_ALL_ONLINE		(RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+
+#define DBG_DMP_TRACE_BUFFER_SIZE	0x800
+#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
+	((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
 
 struct dump_sign {
 	u32 time_stamp;
@@ -86,185 +107,255 @@
 	u16 info;
 };
 
-#define REGS_COUNT			834
-static const struct reg_addr reg_addrs[REGS_COUNT] = {
+static const struct reg_addr reg_addrs[] = {
 	{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
 	{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
-	{ 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
-	{ 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
-	{ 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
-	{ 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
-	{ 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
-	{ 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
-	{ 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
-	{ 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
-	{ 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
-	{ 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
-	{ 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+	{ 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2E3_ONLINE },
+	{ 0x9000, 147, RI_E2E3_ONLINE }, { 0x924c, 1, RI_E2_ONLINE },
+	{ 0x9250, 16, RI_E2E3_ONLINE }, { 0x9400, 33, RI_E2E3_ONLINE },
+	{ 0x9484, 5, RI_E3_ONLINE }, { 0xa000, 27, RI_ALL_ONLINE },
+	{ 0xa06c, 1, RI_E1E1H_ONLINE }, { 0xa070, 71, RI_ALL_ONLINE },
+	{ 0xa18c, 4, RI_E1E1H_ONLINE }, { 0xa19c, 62, RI_ALL_ONLINE },
+	{ 0xa294, 2, RI_E1E1H_ONLINE }, { 0xa29c, 2, RI_ALL_ONLINE },
+	{ 0xa2a4, 2, RI_E1E1HE2_ONLINE }, { 0xa2ac, 52, RI_ALL_ONLINE },
+	{ 0xa39c, 7, RI_E1HE2E3_ONLINE }, { 0xa3b8, 2, RI_E3_ONLINE },
+	{ 0xa3c0, 3, RI_E1HE2E3_ONLINE }, { 0xa3d0, 1, RI_E1HE2E3_ONLINE },
+	{ 0xa3d8, 1, RI_E1HE2E3_ONLINE }, { 0xa3e0, 1, RI_E1HE2E3_ONLINE },
+	{ 0xa3e8, 1, RI_E1HE2E3_ONLINE }, { 0xa3f0, 1, RI_E1HE2E3_ONLINE },
+	{ 0xa3f8, 1, RI_E1HE2E3_ONLINE }, { 0xa400, 40, RI_ALL_ONLINE },
+	{ 0xa4a0, 1, RI_E1E1HE2_ONLINE }, { 0xa4a4, 2, RI_ALL_ONLINE },
+	{ 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
 	{ 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
-	{ 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
-	{ 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
-	{ 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
-	{ 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
-	{ 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
-	{ 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
-	{ 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
-	{ 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
-	{ 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
-	{ 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
-	{ 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
-	{ 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
-	{ 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
-	{ 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
-	{ 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
-	{ 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+	{ 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 3, RI_ALL_ONLINE },
+	{ 0xa4fc, 2, RI_ALL_ONLINE }, { 0xa504, 1, RI_E1E1H_ONLINE },
+	{ 0xa508, 3, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
+	{ 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
+	{ 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
+	{ 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_E1E1H_ONLINE },
+	{ 0xa550, 1, RI_E1E1H_ONLINE }, { 0xa558, 1, RI_E1E1H_ONLINE },
+	{ 0xa560, 1, RI_E1E1H_ONLINE }, { 0xa568, 1, RI_E1E1H_ONLINE },
+	{ 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
+	{ 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
+	{ 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1HE2E3_ONLINE },
+	{ 0xa5e8, 1, RI_E1HE2E3_ONLINE }, { 0xa5f0, 1, RI_E1HE2E3_ONLINE },
+	{ 0xa5f8, 1, RI_E1HE2_ONLINE }, { 0xa5fc, 9, RI_E1HE2E3_ONLINE },
+	{ 0xa620, 6, RI_E2E3_ONLINE }, { 0xa638, 20, RI_E2_ONLINE },
+	{ 0xa688, 42, RI_E2E3_ONLINE }, { 0xa730, 1, RI_E2_ONLINE },
+	{ 0xa734, 2, RI_E2E3_ONLINE }, { 0xa73c, 4, RI_E2_ONLINE },
+	{ 0xa74c, 5, RI_E2E3_ONLINE }, { 0xa760, 5, RI_E2_ONLINE },
+	{ 0xa774, 7, RI_E2E3_ONLINE }, { 0xa790, 15, RI_E2_ONLINE },
+	{ 0xa7cc, 4, RI_E2E3_ONLINE }, { 0xa7e0, 6, RI_E3_ONLINE },
+	{ 0xa800, 18, RI_E2_ONLINE }, { 0xa848, 33, RI_E2E3_ONLINE },
+	{ 0xa8cc, 2, RI_E3_ONLINE }, { 0xa8d4, 4, RI_E2E3_ONLINE },
+	{ 0xa8e4, 1, RI_E3_ONLINE }, { 0xa8e8, 1, RI_E2E3_ONLINE },
+	{ 0xa8f0, 1, RI_E2E3_ONLINE }, { 0xa8f8, 30, RI_E3_ONLINE },
+	{ 0xa974, 73, RI_E3_ONLINE }, { 0xac30, 1, RI_E3_ONLINE },
+	{ 0xac40, 1, RI_E3_ONLINE }, { 0xac50, 1, RI_E3_ONLINE },
+	{ 0x10000, 9, RI_ALL_ONLINE }, { 0x10024, 1, RI_E1E1HE2_ONLINE },
+	{ 0x10028, 5, RI_ALL_ONLINE }, { 0x1003c, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10054, 20, RI_ALL_ONLINE }, { 0x100a4, 4, RI_E1E1HE2_ONLINE },
+	{ 0x100b4, 11, RI_ALL_ONLINE }, { 0x100e0, 4, RI_E1E1HE2_ONLINE },
+	{ 0x100f0, 8, RI_ALL_ONLINE }, { 0x10110, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10128, 110, RI_ALL_ONLINE }, { 0x102e0, 4, RI_E1E1HE2_ONLINE },
+	{ 0x102f0, 18, RI_ALL_ONLINE }, { 0x10338, 20, RI_E1E1HE2_ONLINE },
+	{ 0x10388, 10, RI_ALL_ONLINE }, { 0x10400, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10418, 6, RI_ALL_ONLINE }, { 0x10430, 10, RI_E1E1HE2_ONLINE },
+	{ 0x10458, 22, RI_ALL_ONLINE }, { 0x104b0, 12, RI_E1E1HE2_ONLINE },
+	{ 0x104e0, 1, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
 	{ 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
-	{ 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
-	{ 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
-	{ 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
-	{ 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
-	{ 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+	{ 0x10750, 2, RI_E1E1HE2_ONLINE }, { 0x10760, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10770, 2, RI_E1E1HE2_ONLINE }, { 0x10780, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107b0, 2, RI_E1E1HE2_ONLINE }, { 0x107c0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107d0, 2, RI_E1E1HE2_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
 	{ 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
-	{ 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
-	{ 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
-	{ 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
-	{ 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
-	{ 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
-	{ 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
-	{ 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
-	{ 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
-	{ 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
-	{ 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
-	{ 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
-	{ 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
-	{ 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
-	{ 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
-	{ 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
-	{ 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
-	{ 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
-	{ 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
-	{ 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
-	{ 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
-	{ 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
-	{ 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
-	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
-	{ 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
-	{ 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+	{ 0x16000, 1, RI_E1HE2_ONLINE }, { 0x16004, 25, RI_E1HE2E3_ONLINE },
+	{ 0x16070, 18, RI_E1HE2E3_ONLINE }, { 0x160c0, 7, RI_E1HE2E3_ONLINE },
+	{ 0x160dc, 2, RI_E1HE2_ONLINE }, { 0x160e4, 10, RI_E1HE2E3_ONLINE },
+	{ 0x1610c, 2, RI_E1HE2_ONLINE }, { 0x16114, 6, RI_E1HE2E3_ONLINE },
+	{ 0x16140, 48, RI_E1HE2E3_ONLINE }, { 0x16204, 5, RI_E1HE2E3_ONLINE },
+	{ 0x18000, 1, RI_E1HE2E3_ONLINE }, { 0x18008, 1, RI_E1HE2E3_ONLINE },
+	{ 0x18010, 35, RI_E2E3_ONLINE }, { 0x180a4, 2, RI_E2E3_ONLINE },
+	{ 0x180c0, 109, RI_E2E3_ONLINE }, { 0x18274, 1, RI_E2_ONLINE },
+	{ 0x18278, 81, RI_E2E3_ONLINE }, { 0x18440, 63, RI_E2E3_ONLINE },
+	{ 0x18570, 42, RI_E3_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
+	{ 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 94, RI_ALL_ONLINE },
+	{ 0x201f8, 1, RI_E1E1H_ONLINE }, { 0x201fc, 1, RI_ALL_ONLINE },
+	{ 0x20200, 1, RI_E1E1H_ONLINE }, { 0x20204, 1, RI_ALL_ONLINE },
+	{ 0x20208, 1, RI_E1E1H_ONLINE }, { 0x2020c, 39, RI_ALL_ONLINE },
+	{ 0x202c8, 1, RI_E2E3_ONLINE }, { 0x202d8, 4, RI_E2E3_ONLINE },
+	{ 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
+	{ 0x2042c, 18, RI_E1HE2E3_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
+	{ 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
+	{ 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
+	{ 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
+	{ 0x40000, 98, RI_ALL_ONLINE }, { 0x401a8, 8, RI_E1HE2E3_ONLINE },
+	{ 0x401c8, 1, RI_E1H_ONLINE }, { 0x401cc, 2, RI_E1HE2E3_ONLINE },
+	{ 0x401d4, 2, RI_E2E3_ONLINE }, { 0x40200, 4, RI_ALL_ONLINE },
+	{ 0x40220, 18, RI_E2E3_ONLINE }, { 0x40268, 2, RI_E3_ONLINE },
+	{ 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2E3_ONLINE },
+	{ 0x404e0, 1, RI_E2E3_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
 	{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
 	{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
-	{ 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
-	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
-	{ 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+	{ 0x40550, 10, RI_E2E3_ONLINE }, { 0x40610, 2, RI_E2E3_ONLINE },
+	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2E3_ONLINE },
+	{ 0x422d4, 5, RI_E1HE2E3_ONLINE }, { 0x422e8, 1, RI_E2E3_ONLINE },
 	{ 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
-	{ 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+	{ 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2E3_ONLINE },
 	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
 	{ 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
-	{ 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+	{ 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2E3_ONLINE },
 	{ 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
-	{ 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
-	{ 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
-	{ 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
-	{ 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
+	{ 0x50228, 6, RI_E1HE2E3_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+	{ 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2E3_ONLINE },
+	{ 0x5030c, 1, RI_E2E3_ONLINE }, { 0x50318, 1, RI_E2E3_ONLINE },
+	{ 0x5031c, 1, RI_E2E3_ONLINE }, { 0x50320, 2, RI_E2E3_ONLINE },
 	{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
 	{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
 	{ 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
 	{ 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
 	{ 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
-	{ 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+	{ 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_E1E1HE2_ONLINE },
 	{ 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
-	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
-	{ 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
-	{ 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
+	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2E3_ONLINE },
+	{ 0x601ac, 18, RI_E2E3_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2E3_ONLINE },
 	{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
-	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+	{ 0x61800, 512, RI_E3_OFFLINE }, { 0x70000, 8, RI_ALL_ONLINE },
+	{ 0x70020, 8184, RI_ALL_OFFLINE }, { 0x78000, 8192, RI_E3_OFFLINE },
 	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
 	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
-	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+	{ 0xb0000, 16384, RI_E1H_ONLINE },
+	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2E3_ONLINE },
 	{ 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
-	{ 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
+	{ 0xc22c0, 5, RI_E2E3_ONLINE }, { 0xc22d8, 4, RI_E2E3_ONLINE },
 	{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
 	{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
-	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
-	{ 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2E3_ONLINE },
+	{ 0xc42e0, 7, RI_E1HE2E3_ONLINE }, { 0xc42fc, 1, RI_E2E3_ONLINE },
 	{ 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
-	{ 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
+	{ 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2E3_ONLINE },
 	{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
 	{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
-	{ 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
-	{ 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+	{ 0xd01fc, 1, RI_E2E3_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+	{ 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2E3_ONLINE },
 	{ 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
 	{ 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
 	{ 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
 	{ 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
 	{ 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
 	{ 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
-	{ 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
-	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
-	{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
-	{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
-	{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
-	{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
-	{ 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
-	{ 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
-	{ 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
-	{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
-	{ 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
-	{ 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
-	{ 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
-	{ 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
-	{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
-	{ 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
-	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
-	{ 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
-	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
-	{ 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+	{ 0xe01f4, 1, RI_E2_ONLINE }, { 0xe01f8, 1, RI_E2E3_ONLINE },
+	{ 0xe0200, 2, RI_ALL_ONLINE }, { 0xe020c, 8, RI_ALL_ONLINE },
+	{ 0xe022c, 18, RI_E1HE2E3_ONLINE }, { 0xe0280, 1, RI_ALL_ONLINE },
+	{ 0xe0300, 1, RI_ALL_ONLINE }, { 0xe1000, 1, RI_ALL_ONLINE },
+	{ 0xe2000, 1, RI_ALL_ONLINE }, { 0xe2004, 2047, RI_ALL_OFFLINE },
+	{ 0xf0000, 1, RI_ALL_ONLINE }, { 0xf0004, 16383, RI_ALL_OFFLINE },
+	{ 0x101000, 12, RI_ALL_ONLINE }, { 0x101050, 1, RI_E1HE2E3_ONLINE },
+	{ 0x101054, 3, RI_E2E3_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
+	{ 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
+	{ 0x102068, 6, RI_E2E3_ONLINE }, { 0x102080, 17, RI_ALL_ONLINE },
+	{ 0x1020c8, 8, RI_E1H_ONLINE }, { 0x1020e8, 9, RI_E2E3_ONLINE },
+	{ 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
+	{ 0x103098, 5, RI_E1HE2E3_ONLINE }, { 0x1030ac, 2, RI_E2E3_ONLINE },
+	{ 0x1030b4, 1, RI_E2_ONLINE }, { 0x1030b8, 7, RI_E2E3_ONLINE },
+	{ 0x1030d8, 8, RI_E2E3_ONLINE }, { 0x103400, 1, RI_E2E3_ONLINE },
+	{ 0x103404, 135, RI_E2E3_OFFLINE }, { 0x103800, 8, RI_ALL_ONLINE },
+	{ 0x104000, 63, RI_ALL_ONLINE }, { 0x10411c, 16, RI_E2E3_ONLINE },
+	{ 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
+	{ 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
+	{ 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 256, RI_ALL_ONLINE },
+	{ 0x105400, 768, RI_ALL_OFFLINE }, { 0x107000, 7, RI_E2E3_ONLINE },
+	{ 0x10701c, 1, RI_E3_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
 	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
 	{ 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
 	{ 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
-	{ 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
-	{ 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
-	{ 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
-	{ 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
-	{ 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
-	{ 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
-	{ 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
-	{ 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
-	{ 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
-	{ 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
-	{ 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
-	{ 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
-	{ 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
-	{ 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
-	{ 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
-	{ 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
-	{ 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
-	{ 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
-	{ 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
-	{ 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
-	{ 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
-	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
-	{ 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
-	{ 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
-	{ 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
-	{ 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
-	{ 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
-	{ 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
-	{ 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
-	{ 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
-	{ 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
-	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
-	{ 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
-	{ 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
-	{ 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
-	{ 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
-	{ 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
-	{ 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
-	{ 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
-	{ 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
-	{ 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
-	{ 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
-	{ 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
-	{ 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+	{ 0x110000, 111, RI_E2E3_ONLINE }, { 0x1101dc, 1, RI_E3_ONLINE },
+	{ 0x110200, 4, RI_E2E3_ONLINE }, { 0x120000, 2, RI_ALL_ONLINE },
+	{ 0x120008, 4, RI_ALL_ONLINE }, { 0x120018, 3, RI_ALL_ONLINE },
+	{ 0x120024, 4, RI_ALL_ONLINE }, { 0x120034, 3, RI_ALL_ONLINE },
+	{ 0x120040, 4, RI_ALL_ONLINE }, { 0x120050, 3, RI_ALL_ONLINE },
+	{ 0x12005c, 4, RI_ALL_ONLINE }, { 0x12006c, 3, RI_ALL_ONLINE },
+	{ 0x120078, 4, RI_ALL_ONLINE }, { 0x120088, 3, RI_ALL_ONLINE },
+	{ 0x120094, 4, RI_ALL_ONLINE }, { 0x1200a4, 3, RI_ALL_ONLINE },
+	{ 0x1200b0, 4, RI_ALL_ONLINE }, { 0x1200c0, 3, RI_ALL_ONLINE },
+	{ 0x1200cc, 4, RI_ALL_ONLINE }, { 0x1200dc, 3, RI_ALL_ONLINE },
+	{ 0x1200e8, 4, RI_ALL_ONLINE }, { 0x1200f8, 3, RI_ALL_ONLINE },
+	{ 0x120104, 4, RI_ALL_ONLINE }, { 0x120114, 1, RI_ALL_ONLINE },
+	{ 0x120118, 22, RI_ALL_ONLINE }, { 0x120170, 2, RI_E1E1H_ONLINE },
+	{ 0x120178, 243, RI_ALL_ONLINE }, { 0x120544, 4, RI_E1E1H_ONLINE },
+	{ 0x120554, 6, RI_ALL_ONLINE }, { 0x12059c, 6, RI_E1HE2E3_ONLINE },
+	{ 0x1205b4, 1, RI_E1HE2E3_ONLINE }, { 0x1205b8, 15, RI_E1HE2E3_ONLINE },
+	{ 0x1205f4, 1, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2E3_ONLINE },
+	{ 0x120618, 1, RI_E2E3_ONLINE }, { 0x12061c, 20, RI_E1HE2E3_ONLINE },
+	{ 0x12066c, 11, RI_E1HE2E3_ONLINE }, { 0x120698, 3, RI_E2E3_ONLINE },
+	{ 0x1206a4, 1, RI_E2_ONLINE }, { 0x1206a8, 1, RI_E2E3_ONLINE },
+	{ 0x1206b0, 75, RI_E2E3_ONLINE }, { 0x1207dc, 1, RI_E2_ONLINE },
+	{ 0x1207fc, 1, RI_E2E3_ONLINE }, { 0x12080c, 65, RI_ALL_ONLINE },
+	{ 0x120910, 7, RI_E2E3_ONLINE }, { 0x120930, 9, RI_E2E3_ONLINE },
+	{ 0x12095c, 37, RI_E3_ONLINE }, { 0x120a00, 2, RI_E1E1HE2_ONLINE },
+	{ 0x120b00, 1, RI_E3_ONLINE }, { 0x122000, 2, RI_ALL_ONLINE },
+	{ 0x122008, 2046, RI_E1_OFFLINE }, { 0x128000, 2, RI_E1HE2E3_ONLINE },
+	{ 0x128008, 6142, RI_E1HE2E3_OFFLINE },
+	{ 0x130000, 35, RI_E2E3_ONLINE },
+	{ 0x130100, 29, RI_E2E3_ONLINE }, { 0x130180, 1, RI_E2E3_ONLINE },
+	{ 0x130200, 1, RI_E2E3_ONLINE }, { 0x130280, 1, RI_E2E3_ONLINE },
+	{ 0x130300, 5, RI_E2E3_ONLINE }, { 0x130380, 1, RI_E2E3_ONLINE },
+	{ 0x130400, 1, RI_E2E3_ONLINE }, { 0x130480, 5, RI_E2E3_ONLINE },
+	{ 0x130800, 72, RI_E2E3_ONLINE }, { 0x131000, 136, RI_E2E3_ONLINE },
+	{ 0x132000, 148, RI_E2E3_ONLINE }, { 0x134000, 544, RI_E2E3_ONLINE },
+	{ 0x140000, 64, RI_ALL_ONLINE }, { 0x140100, 5, RI_E1E1H_ONLINE },
+	{ 0x140114, 45, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
+	{ 0x140220, 4, RI_E2E3_ONLINE }, { 0x140240, 4, RI_E2E3_ONLINE },
+	{ 0x140260, 4, RI_E2E3_ONLINE }, { 0x140280, 4, RI_E2E3_ONLINE },
+	{ 0x1402a0, 4, RI_E2E3_ONLINE }, { 0x1402c0, 4, RI_E2E3_ONLINE },
+	{ 0x1402e0, 13, RI_E2E3_ONLINE }, { 0x144000, 4, RI_E1E1H_ONLINE },
+	{ 0x148000, 4, RI_E1E1H_ONLINE }, { 0x14c000, 4, RI_E1E1H_ONLINE },
+	{ 0x150000, 4, RI_E1E1H_ONLINE }, { 0x154000, 4, RI_E1E1H_ONLINE },
+	{ 0x158000, 4, RI_E1E1H_ONLINE }, { 0x15c000, 2, RI_E1HE2E3_ONLINE },
+	{ 0x15c008, 5, RI_E1H_ONLINE }, { 0x15c020, 27, RI_E2E3_ONLINE },
+	{ 0x15c090, 13, RI_E2E3_ONLINE }, { 0x15c0c8, 34, RI_E2E3_ONLINE },
+	{ 0x15c150, 4, RI_E3_ONLINE }, { 0x160004, 6, RI_E3_ONLINE },
+	{ 0x160040, 6, RI_E3_ONLINE }, { 0x16005c, 6, RI_E3_ONLINE },
+	{ 0x160078, 2, RI_E3_ONLINE }, { 0x160300, 8, RI_E3_ONLINE },
+	{ 0x160330, 6, RI_E3_ONLINE }, { 0x160404, 6, RI_E3_ONLINE },
+	{ 0x160440, 6, RI_E3_ONLINE }, { 0x16045c, 6, RI_E3_ONLINE },
+	{ 0x160478, 2, RI_E3_ONLINE }, { 0x160700, 8, RI_E3_ONLINE },
+	{ 0x160730, 6, RI_E3_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+	{ 0x16103c, 2, RI_E2E3_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+	{ 0x162000, 54, RI_E3_ONLINE }, { 0x162200, 60, RI_E3_ONLINE },
+	{ 0x162400, 54, RI_E3_ONLINE }, { 0x162600, 60, RI_E3_ONLINE },
+	{ 0x162800, 54, RI_E3_ONLINE }, { 0x162a00, 60, RI_E3_ONLINE },
+	{ 0x162c00, 54, RI_E3_ONLINE }, { 0x162e00, 60, RI_E3_ONLINE },
+	{ 0x163000, 1, RI_E3_ONLINE }, { 0x163008, 1, RI_E3_ONLINE },
+	{ 0x163010, 1, RI_E3_ONLINE }, { 0x163018, 1, RI_E3_ONLINE },
+	{ 0x163020, 5, RI_E3_ONLINE }, { 0x163038, 3, RI_E3_ONLINE },
+	{ 0x163048, 3, RI_E3_ONLINE }, { 0x163058, 1, RI_E3_ONLINE },
+	{ 0x163060, 1, RI_E3_ONLINE }, { 0x163068, 1, RI_E3_ONLINE },
+	{ 0x163070, 3, RI_E3_ONLINE }, { 0x163080, 1, RI_E3_ONLINE },
+	{ 0x163088, 3, RI_E3_ONLINE }, { 0x163098, 1, RI_E3_ONLINE },
+	{ 0x1630a0, 1, RI_E3_ONLINE }, { 0x1630a8, 1, RI_E3_ONLINE },
+	{ 0x1630c0, 1, RI_E3_ONLINE }, { 0x1630c8, 1, RI_E3_ONLINE },
+	{ 0x1630d0, 1, RI_E3_ONLINE }, { 0x1630d8, 1, RI_E3_ONLINE },
+	{ 0x1630e0, 2, RI_E3_ONLINE }, { 0x163110, 1, RI_E3_ONLINE },
+	{ 0x163120, 2, RI_E3_ONLINE }, { 0x163420, 4, RI_E3_ONLINE },
+	{ 0x163438, 2, RI_E3_ONLINE }, { 0x163488, 2, RI_E3_ONLINE },
+	{ 0x163520, 2, RI_E3_ONLINE }, { 0x163800, 1, RI_E3_ONLINE },
+	{ 0x163808, 1, RI_E3_ONLINE }, { 0x163810, 1, RI_E3_ONLINE },
+	{ 0x163818, 1, RI_E3_ONLINE }, { 0x163820, 5, RI_E3_ONLINE },
+	{ 0x163838, 3, RI_E3_ONLINE }, { 0x163848, 3, RI_E3_ONLINE },
+	{ 0x163858, 1, RI_E3_ONLINE }, { 0x163860, 1, RI_E3_ONLINE },
+	{ 0x163868, 1, RI_E3_ONLINE }, { 0x163870, 3, RI_E3_ONLINE },
+	{ 0x163880, 1, RI_E3_ONLINE }, { 0x163888, 3, RI_E3_ONLINE },
+	{ 0x163898, 1, RI_E3_ONLINE }, { 0x1638a0, 1, RI_E3_ONLINE },
+	{ 0x1638a8, 1, RI_E3_ONLINE }, { 0x1638c0, 1, RI_E3_ONLINE },
+	{ 0x1638c8, 1, RI_E3_ONLINE }, { 0x1638d0, 1, RI_E3_ONLINE },
+	{ 0x1638d8, 1, RI_E3_ONLINE }, { 0x1638e0, 2, RI_E3_ONLINE },
+	{ 0x163910, 1, RI_E3_ONLINE }, { 0x163920, 2, RI_E3_ONLINE },
+	{ 0x163c20, 4, RI_E3_ONLINE }, { 0x163c38, 2, RI_E3_ONLINE },
+	{ 0x163c88, 2, RI_E3_ONLINE }, { 0x163d20, 2, RI_E3_ONLINE },
+	{ 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2E3_ONLINE },
+	{ 0x164118, 15, RI_E2E3_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
 	{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
 	{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
 	{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -273,9 +364,9 @@
 	{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
 	{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
 	{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
-	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
+	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2E3_ONLINE },
 	{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
-	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2E3_ONLINE },
 	{ 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
 	{ 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
 	{ 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
@@ -285,89 +376,94 @@
 	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
 	{ 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
 	{ 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
-	{ 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+	{ 0x16e040, 8, RI_E2E3_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
 	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
-	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
-	{ 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
-	{ 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+	{ 0x16e6bc, 4, RI_E1HE2E3_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+	{ 0x16e6e0, 12, RI_E2E3_ONLINE }, { 0x16e768, 17, RI_E2E3_ONLINE },
 	{ 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
-	{ 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
-	{ 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
-	{ 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
-	{ 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
-	{ 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
-	{ 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
-	{ 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
-	{ 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
-	{ 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
-	{ 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+	{ 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2E3_ONLINE },
+	{ 0x1701c4, 1, RI_E2E3_ONLINE }, { 0x1701cc, 7, RI_E2E3_ONLINE },
+	{ 0x1701e8, 1, RI_E3_ONLINE }, { 0x1701ec, 1, RI_E2E3_ONLINE },
+	{ 0x1701f4, 1, RI_E2E3_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
+	{ 0x170214, 1, RI_ALL_ONLINE }, { 0x170218, 77, RI_E2E3_ONLINE },
+	{ 0x170400, 64, RI_E2E3_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
+	{ 0x180000, 61, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1HE2E3_ONLINE },
+	{ 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
+	{ 0x180380, 1, RI_E2E3_ONLINE }, { 0x180388, 1, RI_E2E3_ONLINE },
+	{ 0x180390, 1, RI_E2E3_ONLINE }, { 0x180398, 1, RI_E2E3_ONLINE },
+	{ 0x1803a0, 5, RI_E2E3_ONLINE }, { 0x1803b4, 2, RI_E3_ONLINE },
 	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
 	{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
-	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
-	{ 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
-	{ 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
-	{ 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
-	{ 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
-	{ 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
-	{ 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
-	{ 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
-	{ 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
-	{ 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
-	{ 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
-	{ 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
-	{ 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
-	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
-	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
-	{ 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
-	{ 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
-	{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
-	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
-	{ 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
-	{ 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
-	{ 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
-	{ 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
-	{ 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
-	{ 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
-	{ 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
-	{ 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
-	{ 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
-	{ 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
-	{ 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
-	{ 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
-	{ 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
-	{ 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
-	{ 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
-	{ 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
-	{ 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
-	{ 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
-	{ 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
-	{ 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
-	{ 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
-	{ 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
-	{ 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
-	{ 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
-	{ 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
-	{ 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
-	{ 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
-	{ 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
-	{ 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
-	{ 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
-	{ 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
-	{ 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
-	{ 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
-	{ 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
-	{ 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
-	{ 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x182000, 4, RI_E3_ONLINE }, { 0x1a0000, 1, RI_ALL_ONLINE },
+	{ 0x1a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x1a5800, 2560, RI_E1HE2E3_OFFLINE },
+	{ 0x1a8000, 1, RI_ALL_ONLINE }, { 0x1a8004, 8191, RI_E1HE2E3_OFFLINE },
+	{ 0x1b0000, 1, RI_ALL_ONLINE }, { 0x1b0004, 15, RI_E1H_OFFLINE },
+	{ 0x1b0040, 1, RI_E1HE2E3_ONLINE }, { 0x1b0044, 239, RI_E1H_OFFLINE },
+	{ 0x1b0400, 1, RI_ALL_ONLINE }, { 0x1b0404, 255, RI_E1H_OFFLINE },
+	{ 0x1b0800, 1, RI_ALL_ONLINE }, { 0x1b0840, 1, RI_E1HE2E3_ONLINE },
+	{ 0x1b0c00, 1, RI_ALL_ONLINE }, { 0x1b1000, 1, RI_ALL_ONLINE },
+	{ 0x1b1040, 1, RI_E1HE2E3_ONLINE }, { 0x1b1400, 1, RI_ALL_ONLINE },
+	{ 0x1b1440, 1, RI_E1HE2E3_ONLINE }, { 0x1b1480, 1, RI_E1HE2E3_ONLINE },
+	{ 0x1b14c0, 1, RI_E1HE2E3_ONLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
+	{ 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_ONLINE },
+	{ 0x1b2400, 1, RI_E1HE2E3_ONLINE }, { 0x1b2404, 5631, RI_E2E3_OFFLINE },
+	{ 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+	{ 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8100, 1, RI_ALL_ONLINE }, { 0x1b8140, 1, RI_ALL_ONLINE },
+	{ 0x1b8180, 1, RI_ALL_ONLINE }, { 0x1b81c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8200, 1, RI_ALL_ONLINE }, { 0x1b8240, 1, RI_ALL_ONLINE },
+	{ 0x1b8280, 1, RI_ALL_ONLINE }, { 0x1b82c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8300, 1, RI_ALL_ONLINE }, { 0x1b8340, 1, RI_ALL_ONLINE },
+	{ 0x1b8380, 1, RI_ALL_ONLINE }, { 0x1b83c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8400, 1, RI_ALL_ONLINE }, { 0x1b8440, 1, RI_ALL_ONLINE },
+	{ 0x1b8480, 1, RI_ALL_ONLINE }, { 0x1b84c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8500, 1, RI_ALL_ONLINE }, { 0x1b8540, 1, RI_ALL_ONLINE },
+	{ 0x1b8580, 1, RI_ALL_ONLINE }, { 0x1b85c0, 19, RI_E2E3_ONLINE },
+	{ 0x1b8800, 1, RI_ALL_ONLINE }, { 0x1b8840, 1, RI_ALL_ONLINE },
+	{ 0x1b8880, 1, RI_ALL_ONLINE }, { 0x1b88c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8900, 1, RI_ALL_ONLINE }, { 0x1b8940, 1, RI_ALL_ONLINE },
+	{ 0x1b8980, 1, RI_ALL_ONLINE }, { 0x1b89c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8a00, 1, RI_ALL_ONLINE }, { 0x1b8a40, 1, RI_ALL_ONLINE },
+	{ 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x1b8b00, 1, RI_ALL_ONLINE }, { 0x1b8b40, 1, RI_ALL_ONLINE },
+	{ 0x1b8b80, 1, RI_ALL_ONLINE }, { 0x1b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8c00, 1, RI_ALL_ONLINE }, { 0x1b8c40, 1, RI_ALL_ONLINE },
+	{ 0x1b8c80, 1, RI_ALL_ONLINE }, { 0x1b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc4, 1, RI_E2E3_ONLINE }, { 0x1b8d00, 1, RI_ALL_ONLINE },
+	{ 0x1b8d40, 1, RI_ALL_ONLINE }, { 0x1b8d80, 1, RI_ALL_ONLINE },
+	{ 0x1b8dc0, 1, RI_ALL_ONLINE }, { 0x1b8e00, 1, RI_ALL_ONLINE },
+	{ 0x1b8e40, 1, RI_ALL_ONLINE }, { 0x1b8e80, 1, RI_ALL_ONLINE },
+	{ 0x1b8e84, 1, RI_E2E3_ONLINE }, { 0x1b8ec0, 1, RI_E1HE2E3_ONLINE },
+	{ 0x1b8f00, 1, RI_E1HE2E3_ONLINE }, { 0x1b8f40, 1, RI_E1HE2E3_ONLINE },
+	{ 0x1b8f80, 1, RI_E1HE2E3_ONLINE }, { 0x1b8fc0, 1, RI_E1HE2E3_ONLINE },
+	{ 0x1b8fc4, 2, RI_E2E3_ONLINE }, { 0x1b8fd0, 6, RI_E2E3_ONLINE },
+	{ 0x1b8fe8, 2, RI_E3_ONLINE }, { 0x1b9000, 1, RI_E2E3_ONLINE },
+	{ 0x1b9040, 3, RI_E2E3_ONLINE }, { 0x1b905c, 1, RI_E3_ONLINE },
+	{ 0x1b9400, 14, RI_E2E3_ONLINE }, { 0x1b943c, 19, RI_E2E3_ONLINE },
+	{ 0x1b9490, 10, RI_E2E3_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
+	{ 0x200000, 65, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1HE2E3_ONLINE },
+	{ 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
+	{ 0x200380, 1, RI_E2E3_ONLINE }, { 0x200388, 1, RI_E2E3_ONLINE },
+	{ 0x200390, 1, RI_E2E3_ONLINE }, { 0x200398, 1, RI_E2E3_ONLINE },
+	{ 0x2003a0, 1, RI_E2E3_ONLINE }, { 0x2003a8, 2, RI_E2E3_ONLINE },
+	{ 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_E1E1H_OFFLINE },
+	{ 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
+	{ 0x204000, 4, RI_E3_ONLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+	{ 0x220004, 5631, RI_ALL_OFFLINE },
+	{ 0x225800, 2560, RI_E1HE2E3_OFFLINE },
+	{ 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2E3_OFFLINE },
 	{ 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
-	{ 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+	{ 0x230040, 1, RI_E1HE2E3_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
 	{ 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
-	{ 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+	{ 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2E3_ONLINE },
 	{ 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
-	{ 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
-	{ 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
-	{ 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+	{ 0x231040, 1, RI_E1HE2E3_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+	{ 0x231440, 1, RI_E1HE2E3_ONLINE }, { 0x231480, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2314c0, 1, RI_E1HE2E3_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
 	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
-	{ 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+	{ 0x232400, 1, RI_E1HE2E3_ONLINE }, { 0x232404, 5631, RI_E2E3_OFFLINE },
 	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
 	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
 	{ 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
@@ -379,7 +475,7 @@
 	{ 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
 	{ 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
 	{ 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
-	{ 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+	{ 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2E3_ONLINE },
 	{ 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
 	{ 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
 	{ 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
@@ -390,88 +486,91 @@
 	{ 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
 	{ 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
 	{ 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
-	{ 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+	{ 0x238cc4, 1, RI_E2E3_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
 	{ 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
 	{ 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
 	{ 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
-	{ 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
-	{ 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
-	{ 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
-	{ 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
-	{ 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+	{ 0x238e84, 1, RI_E2E3_ONLINE }, { 0x238ec0, 1, RI_E1HE2E3_ONLINE },
+	{ 0x238f00, 1, RI_E1HE2E3_ONLINE }, { 0x238f40, 1, RI_E1HE2E3_ONLINE },
+	{ 0x238f80, 1, RI_E1HE2E3_ONLINE }, { 0x238fc0, 1, RI_E1HE2E3_ONLINE },
+	{ 0x238fc4, 2, RI_E2E3_ONLINE }, { 0x238fd0, 6, RI_E2E3_ONLINE },
+	{ 0x238fe8, 2, RI_E3_ONLINE }, { 0x239000, 1, RI_E2E3_ONLINE },
+	{ 0x239040, 3, RI_E2E3_ONLINE }, { 0x23905c, 1, RI_E3_ONLINE },
 	{ 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
-	{ 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
-	{ 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
-	{ 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
-	{ 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
-	{ 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+	{ 0x28014c, 2, RI_E1HE2E3_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+	{ 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2E3_ONLINE },
+	{ 0x280388, 1, RI_E2E3_ONLINE }, { 0x280390, 1, RI_E2E3_ONLINE },
+	{ 0x280398, 1, RI_E2E3_ONLINE }, { 0x2803a0, 1, RI_E2E3_ONLINE },
+	{ 0x2803a8, 2, RI_E2E3_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
 	{ 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
-	{ 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
-	{ 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
-	{ 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
-	{ 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
-	{ 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
-	{ 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
-	{ 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
-	{ 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
-	{ 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
-	{ 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
-	{ 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
-	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
-	{ 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
-	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
-	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
-	{ 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
-	{ 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
-	{ 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
-	{ 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
-	{ 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
-	{ 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
-	{ 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
-	{ 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
-	{ 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
-	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
-	{ 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
-	{ 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
-	{ 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
-	{ 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
-	{ 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
-	{ 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
-	{ 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
-	{ 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
-	{ 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
-	{ 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
-	{ 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
-	{ 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
-	{ 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
-	{ 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
-	{ 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
-	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
-	{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
-	{ 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
-	{ 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
-	{ 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
-	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
-	{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
+	{ 0x282010, 2044, RI_ALL_OFFLINE }, { 0x284000, 4, RI_E3_ONLINE },
+	{ 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x2a5800, 2560, RI_E1HE2E3_OFFLINE }, { 0x2a8000, 1, RI_ALL_ONLINE },
+	{ 0x2a8004, 8191, RI_E1HE2E3_OFFLINE }, { 0x2b0000, 1, RI_ALL_ONLINE },
+	{ 0x2b0004, 15, RI_E1H_OFFLINE }, { 0x2b0040, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b0044, 239, RI_E1H_OFFLINE }, { 0x2b0400, 1, RI_ALL_ONLINE },
+	{ 0x2b0404, 255, RI_E1H_OFFLINE }, { 0x2b0800, 1, RI_ALL_ONLINE },
+	{ 0x2b0840, 1, RI_E1HE2E3_ONLINE }, { 0x2b0c00, 1, RI_ALL_ONLINE },
+	{ 0x2b1000, 1, RI_ALL_ONLINE }, { 0x2b1040, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b1400, 1, RI_ALL_ONLINE }, { 0x2b1440, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b1480, 1, RI_E1HE2E3_ONLINE }, { 0x2b14c0, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b1800, 128, RI_ALL_OFFLINE }, { 0x2b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x2b2000, 1, RI_ALL_ONLINE }, { 0x2b2400, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b2404, 5631, RI_E2E3_OFFLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
+	{ 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
+	{ 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x2b8100, 1, RI_ALL_ONLINE },
+	{ 0x2b8140, 1, RI_ALL_ONLINE }, { 0x2b8180, 1, RI_ALL_ONLINE },
+	{ 0x2b81c0, 1, RI_ALL_ONLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
+	{ 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
+	{ 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8300, 1, RI_ALL_ONLINE },
+	{ 0x2b8340, 1, RI_ALL_ONLINE }, { 0x2b8380, 1, RI_ALL_ONLINE },
+	{ 0x2b83c0, 1, RI_ALL_ONLINE }, { 0x2b8400, 1, RI_ALL_ONLINE },
+	{ 0x2b8440, 1, RI_ALL_ONLINE }, { 0x2b8480, 1, RI_ALL_ONLINE },
+	{ 0x2b84c0, 1, RI_ALL_ONLINE }, { 0x2b8500, 1, RI_ALL_ONLINE },
+	{ 0x2b8540, 1, RI_ALL_ONLINE }, { 0x2b8580, 1, RI_ALL_ONLINE },
+	{ 0x2b85c0, 19, RI_E2E3_ONLINE }, { 0x2b8800, 1, RI_ALL_ONLINE },
+	{ 0x2b8840, 1, RI_ALL_ONLINE }, { 0x2b8880, 1, RI_ALL_ONLINE },
+	{ 0x2b88c0, 1, RI_ALL_ONLINE }, { 0x2b8900, 1, RI_ALL_ONLINE },
+	{ 0x2b8940, 1, RI_ALL_ONLINE }, { 0x2b8980, 1, RI_ALL_ONLINE },
+	{ 0x2b89c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
+	{ 0x2b8a40, 1, RI_ALL_ONLINE }, { 0x2b8a80, 1, RI_ALL_ONLINE },
+	{ 0x2b8ac0, 1, RI_ALL_ONLINE }, { 0x2b8b00, 1, RI_ALL_ONLINE },
+	{ 0x2b8b40, 1, RI_ALL_ONLINE }, { 0x2b8b80, 1, RI_ALL_ONLINE },
+	{ 0x2b8bc0, 1, RI_ALL_ONLINE }, { 0x2b8c00, 1, RI_ALL_ONLINE },
+	{ 0x2b8c40, 1, RI_ALL_ONLINE }, { 0x2b8c80, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc0, 1, RI_ALL_ONLINE }, { 0x2b8cc4, 1, RI_E2E3_ONLINE },
+	{ 0x2b8d00, 1, RI_ALL_ONLINE }, { 0x2b8d40, 1, RI_ALL_ONLINE },
+	{ 0x2b8d80, 1, RI_ALL_ONLINE }, { 0x2b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8e00, 1, RI_ALL_ONLINE }, { 0x2b8e40, 1, RI_ALL_ONLINE },
+	{ 0x2b8e80, 1, RI_ALL_ONLINE }, { 0x2b8e84, 1, RI_E2E3_ONLINE },
+	{ 0x2b8ec0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f00, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b8f40, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f80, 1, RI_E1HE2E3_ONLINE },
+	{ 0x2b8fc0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8fc4, 2, RI_E2E3_ONLINE },
+	{ 0x2b8fd0, 6, RI_E2E3_ONLINE }, { 0x2b8fe8, 2, RI_E3_ONLINE },
+	{ 0x2b9000, 1, RI_E2E3_ONLINE }, { 0x2b9040, 3, RI_E2E3_ONLINE },
+	{ 0x2b905c, 1, RI_E3_ONLINE }, { 0x2b9400, 14, RI_E2E3_ONLINE },
+	{ 0x2b943c, 19, RI_E2E3_ONLINE }, { 0x2b9490, 10, RI_E2E3_ONLINE },
+	{ 0x2c0000, 2, RI_ALL_ONLINE }, { 0x300000, 65, RI_ALL_ONLINE },
+	{ 0x30014c, 2, RI_E1HE2E3_ONLINE }, { 0x300200, 58, RI_ALL_ONLINE },
+	{ 0x300340, 4, RI_ALL_ONLINE }, { 0x300380, 1, RI_E2E3_ONLINE },
+	{ 0x300388, 1, RI_E2E3_ONLINE }, { 0x300390, 1, RI_E2E3_ONLINE },
+	{ 0x300398, 1, RI_E2E3_ONLINE }, { 0x3003a0, 1, RI_E2E3_ONLINE },
+	{ 0x3003a8, 2, RI_E2E3_ONLINE }, { 0x300400, 1, RI_ALL_ONLINE },
+	{ 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE },
+	{ 0x302010, 2044, RI_ALL_OFFLINE }, { 0x304000, 4, RI_E3_ONLINE },
 	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
-	{ 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
-	{ 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
-	{ 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+	{ 0x325800, 2560, RI_E1HE2E3_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+	{ 0x328004, 8191, RI_E1HE2E3_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+	{ 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2E3_ONLINE },
 	{ 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
 	{ 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
-	{ 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
-	{ 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
-	{ 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
-	{ 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+	{ 0x330840, 1, RI_E1HE2E3_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+	{ 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2E3_ONLINE },
+	{ 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2E3_ONLINE },
+	{ 0x331480, 1, RI_E1HE2E3_ONLINE }, { 0x3314c0, 1, RI_E1HE2E3_ONLINE },
 	{ 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
-	{ 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
-	{ 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2E3_ONLINE },
+	{ 0x332404, 5631, RI_E2E3_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
 	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
 	{ 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
 	{ 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
@@ -483,7 +582,7 @@
 	{ 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
 	{ 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
 	{ 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
-	{ 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+	{ 0x3385c0, 19, RI_E2E3_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
 	{ 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
 	{ 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
 	{ 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
@@ -493,35 +592,48 @@
 	{ 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
 	{ 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
 	{ 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
-	{ 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+	{ 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2E3_ONLINE },
 	{ 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
 	{ 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
 	{ 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
-	{ 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
-	{ 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
-	{ 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
-	{ 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
-	{ 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
-	{ 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
+	{ 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2E3_ONLINE },
+	{ 0x338ec0, 1, RI_E1HE2E3_ONLINE }, { 0x338f00, 1, RI_E1HE2E3_ONLINE },
+	{ 0x338f40, 1, RI_E1HE2E3_ONLINE }, { 0x338f80, 1, RI_E1HE2E3_ONLINE },
+	{ 0x338fc0, 1, RI_E1HE2E3_ONLINE }, { 0x338fc4, 2, RI_E2E3_ONLINE },
+	{ 0x338fd0, 6, RI_E2E3_ONLINE }, { 0x338fe8, 2, RI_E3_ONLINE },
+	{ 0x339000, 1, RI_E2E3_ONLINE }, { 0x339040, 3, RI_E2E3_ONLINE },
+	{ 0x33905c, 1, RI_E3_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
 };
+#define REGS_COUNT			ARRAY_SIZE(reg_addrs)
 
-#define IDLE_REGS_COUNT			237
-static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
+static const struct reg_addr idle_addrs[] = {
 	{ 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
 	{ 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
 	{ 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
-	{ 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
-	{ 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
-	{ 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
-	{ 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
-	{ 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
-	{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
-	{ 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
-	{ 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
-	{ 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
-	{ 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
-	{ 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
-	{ 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+	{ 0x285c, 1, RI_ALL_ONLINE }, { 0x3040, 1, RI_ALL_ONLINE },
+	{ 0x9010, 7, RI_E2E3_ONLINE }, { 0x9030, 1, RI_E2E3_ONLINE },
+	{ 0x9068, 16, RI_E2E3_ONLINE }, { 0x9230, 2, RI_E2E3_ONLINE },
+	{ 0x9244, 1, RI_E2E3_ONLINE }, { 0x9298, 1, RI_E2E3_ONLINE },
+	{ 0x92a8, 1, RI_E2E3_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+	{ 0xa3c4, 1, RI_E1HE2E3_ONLINE }, { 0xa404, 3, RI_ALL_ONLINE },
+	{ 0xa42c, 12, RI_ALL_ONLINE }, { 0xa600, 5, RI_E1HE2E3_ONLINE },
+	{ 0xa618, 1, RI_E1HE2E3_ONLINE }, { 0xa714, 1, RI_E2E3_ONLINE },
+	{ 0xa720, 1, RI_E2E3_ONLINE }, { 0xa750, 1, RI_E2E3_ONLINE },
+	{ 0xc09c, 1, RI_E1E1H_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
+	{ 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
+	{ 0x10418, 1, RI_ALL_ONLINE }, { 0x10420, 1, RI_ALL_ONLINE },
+	{ 0x10428, 1, RI_ALL_ONLINE }, { 0x10460, 1, RI_ALL_ONLINE },
+	{ 0x10474, 1, RI_ALL_ONLINE }, { 0x104e0, 1, RI_ALL_ONLINE },
+	{ 0x104ec, 1, RI_ALL_ONLINE }, { 0x104f8, 1, RI_ALL_ONLINE },
+	{ 0x10508, 1, RI_ALL_ONLINE }, { 0x10530, 1, RI_ALL_ONLINE },
+	{ 0x10538, 1, RI_ALL_ONLINE }, { 0x10548, 1, RI_ALL_ONLINE },
+	{ 0x10558, 1, RI_ALL_ONLINE }, { 0x182a8, 1, RI_E2E3_ONLINE },
+	{ 0x182b8, 1, RI_E2E3_ONLINE }, { 0x18308, 1, RI_E2E3_ONLINE },
+	{ 0x18318, 1, RI_E2E3_ONLINE }, { 0x18338, 1, RI_E2E3_ONLINE },
+	{ 0x18348, 1, RI_E2E3_ONLINE }, { 0x183bc, 1, RI_E2E3_ONLINE },
+	{ 0x183cc, 1, RI_E2E3_ONLINE }, { 0x18570, 1, RI_E3_ONLINE },
+	{ 0x18578, 1, RI_E3_ONLINE }, { 0x1858c, 1, RI_E3_ONLINE },
+	{ 0x18594, 1, RI_E3_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
 	{ 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
 	{ 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
 	{ 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
@@ -551,8 +663,8 @@
 	{ 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
 	{ 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
 	{ 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
-	{ 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
-	{ 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
+	{ 0x10309c, 2, RI_E1HE2E3_ONLINE }, { 0x1030b8, 2, RI_E2E3_ONLINE },
+	{ 0x1030cc, 1, RI_E2E3_ONLINE }, { 0x1030e0, 1, RI_E2E3_ONLINE },
 	{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
 	{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
 	{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
@@ -563,28 +675,27 @@
 	{ 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
 	{ 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
 	{ 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
-	{ 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
-	{ 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
-	{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
-	{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
-	{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
-	{ 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE },
-	{ 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE },
-	{ 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE },
-	{ 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE },
-	{ 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE },
-	{ 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE },
-	{ 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE },
-	{ 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE },
-	{ 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE },
-	{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
-	{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
-	{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
-	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
-	{ 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
-	{ 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
-	{ 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
-	{ 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+	{ 0x120608, 1, RI_E1HE2E3_ONLINE }, { 0x120778, 2, RI_E2E3_ONLINE },
+	{ 0x120808, 3, RI_ALL_ONLINE }, { 0x120818, 1, RI_ALL_ONLINE },
+	{ 0x120820, 1, RI_ALL_ONLINE }, { 0x120828, 1, RI_ALL_ONLINE },
+	{ 0x120830, 1, RI_ALL_ONLINE }, { 0x120838, 1, RI_ALL_ONLINE },
+	{ 0x120840, 1, RI_ALL_ONLINE }, { 0x120848, 1, RI_ALL_ONLINE },
+	{ 0x120850, 1, RI_ALL_ONLINE }, { 0x120858, 1, RI_ALL_ONLINE },
+	{ 0x120860, 1, RI_ALL_ONLINE }, { 0x120868, 1, RI_ALL_ONLINE },
+	{ 0x120870, 1, RI_ALL_ONLINE }, { 0x120878, 1, RI_ALL_ONLINE },
+	{ 0x120880, 1, RI_ALL_ONLINE }, { 0x120888, 1, RI_ALL_ONLINE },
+	{ 0x120890, 1, RI_ALL_ONLINE }, { 0x120898, 1, RI_ALL_ONLINE },
+	{ 0x1208a0, 1, RI_ALL_ONLINE }, { 0x1208a8, 1, RI_ALL_ONLINE },
+	{ 0x1208b0, 1, RI_ALL_ONLINE }, { 0x1208b8, 1, RI_ALL_ONLINE },
+	{ 0x1208c0, 1, RI_ALL_ONLINE }, { 0x1208c8, 1, RI_ALL_ONLINE },
+	{ 0x1208d0, 1, RI_ALL_ONLINE }, { 0x1208d8, 1, RI_ALL_ONLINE },
+	{ 0x1208e0, 1, RI_ALL_ONLINE }, { 0x1208e8, 1, RI_ALL_ONLINE },
+	{ 0x1208f0, 1, RI_ALL_ONLINE }, { 0x1208f8, 1, RI_ALL_ONLINE },
+	{ 0x120900, 1, RI_ALL_ONLINE }, { 0x120908, 1, RI_ALL_ONLINE },
+	{ 0x130030, 1, RI_E2E3_ONLINE }, { 0x13004c, 3, RI_E2E3_ONLINE },
+	{ 0x130064, 2, RI_E2E3_ONLINE }, { 0x13009c, 1, RI_E2E3_ONLINE },
+	{ 0x130130, 1, RI_E2E3_ONLINE }, { 0x13016c, 1, RI_E2E3_ONLINE },
+	{ 0x130300, 1, RI_E2E3_ONLINE }, { 0x130480, 1, RI_E2E3_ONLINE },
 	{ 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
 	{ 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
 	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
@@ -602,8 +713,8 @@
 	{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
 	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
 	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
-	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
-	{ 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+	{ 0x16e6fc, 4, RI_E2E3_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
 	{ 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
 	{ 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
 	{ 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
@@ -627,51 +738,61 @@
 	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
 	{ 0x3380c0, 1, RI_ALL_ONLINE }
 };
+#define IDLE_REGS_COUNT			ARRAY_SIZE(idle_addrs)
 
-#define WREGS_COUNT_E1			1
 static const u32 read_reg_e1_0[] = { 0x1b1000 };
+#define WREGS_COUNT_E1			ARRAY_SIZE(read_reg_e1_0)
 
 static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
 	{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
 };
 
-#define WREGS_COUNT_E1H			1
 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
+#define WREGS_COUNT_E1H			ARRAY_SIZE(read_reg_e1h_0)
 
 static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
 	{ 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
 };
 
-#define WREGS_COUNT_E2			1
 static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
+#define WREGS_COUNT_E2			ARRAY_SIZE(read_reg_e2_0)
 
 static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
 	{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
 };
 
-static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
+static const u32 read_reg_e3_0[] = { 0x1b1040, 0x1b1000 };
+#define WREGS_COUNT_E3			ARRAY_SIZE(read_reg_e3_0)
 
-#define TIMER_REGS_COUNT_E1		2
+static const struct wreg_addr wreg_addrs_e3[WREGS_COUNT_E3] = {
+	{ 0x1b0c00, 128, 2, read_reg_e3_0, RI_E3_OFFLINE } };
 
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
-	0x164014, 0x164018 };
+static const struct dump_sign dump_sign_all = { 0x4dbe9fca, 0x60011, 0x3a };
+
+static const u32 timer_status_regs_e1[] = { 0x164014, 0x164018 };
+#define TIMER_REGS_COUNT_E1		ARRAY_SIZE(timer_status_regs_e1)
+
 static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
 	0x1640d0, 0x1640d4 };
 
-#define TIMER_REGS_COUNT_E1H		2
+static const u32 timer_status_regs_e1h[] = { 0x164014, 0x164018 };
+#define TIMER_REGS_COUNT_E1H		ARRAY_SIZE(timer_status_regs_e1h)
 
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
-	0x164014, 0x164018 };
 static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
 	0x1640d0, 0x1640d4 };
 
-#define TIMER_REGS_COUNT_E2		2
+static const u32 timer_status_regs_e2[] = { 0x164014, 0x164018 };
+#define TIMER_REGS_COUNT_E2		ARRAY_SIZE(timer_status_regs_e2)
 
-static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
-	0x164014, 0x164018 };
 static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
 	0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e3[] = { 0x164014, 0x164018 };
+#define TIMER_REGS_COUNT_E3		ARRAY_SIZE(timer_status_regs_e3)
+
+static const u32 timer_scan_regs_e3[TIMER_REGS_COUNT_E3] = {
+	0x1640d0, 0x1640d4 };
+
 #define PAGE_MODE_VALUES_E1 0
 
 #define PAGE_READ_REGS_E1 0
@@ -682,7 +803,8 @@
 
 static const u32 page_write_regs_e1[] = { 0 };
 
-static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+static const struct reg_addr page_read_regs_e1[] = {
+	{ 0x0, 0, RI_E1_ONLINE } };
 
 #define PAGE_MODE_VALUES_E1H 0
 
@@ -697,17 +819,24 @@
 static const struct reg_addr page_read_regs_e1h[] = {
 	{ 0x0, 0, RI_E1H_ONLINE } };
 
-#define PAGE_MODE_VALUES_E2 2
+static const u32 page_vals_e2[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E2		ARRAY_SIZE(page_vals_e2)
 
-#define PAGE_READ_REGS_E2 1
+static const u32 page_write_regs_e2[] = { 328476 };
+#define PAGE_WRITE_REGS_E2		ARRAY_SIZE(page_write_regs_e2)
 
-#define PAGE_WRITE_REGS_E2 1
-
-static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
-
-static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
-
-static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
+static const struct reg_addr page_read_regs_e2[] = {
 	{ 0x58000, 4608, RI_E2_ONLINE } };
+#define PAGE_READ_REGS_E2		ARRAY_SIZE(page_read_regs_e2)
+
+static const u32 page_vals_e3[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E3		ARRAY_SIZE(page_vals_e3)
+
+static const u32 page_write_regs_e3[] = { 328476 };
+#define PAGE_WRITE_REGS_E3		ARRAY_SIZE(page_write_regs_e3)
+
+static const struct reg_addr page_read_regs_e3[] = {
+	{ 0x58000, 4608, RI_E3_ONLINE } };
+#define PAGE_READ_REGS_E3		ARRAY_SIZE(page_read_regs_e3)
 
 #endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 727fe89..1a3ed41 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,6 +25,7 @@
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
 #include "bnx2x_init.h"
+#include "bnx2x_sp.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -37,8 +38,6 @@
 	char string[ETH_GSTRING_LEN];
 } bnx2x_q_stats_arr[] = {
 /* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
-	{ Q_STATS_OFFSET32(error_bytes_received_hi),
-						8, "[%s]: rx_error_bytes" },
 	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
 						8, "[%s]: rx_ucast_packets" },
 	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
@@ -52,13 +51,18 @@
 					 4, "[%s]: rx_skb_alloc_discard" },
 	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
 
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
-	{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+	{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
 						8, "[%s]: tx_ucast_packets" },
 	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
 						8, "[%s]: tx_mcast_packets" },
 	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-						8, "[%s]: tx_bcast_packets" }
+						8, "[%s]: tx_bcast_packets" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+						8, "[%s]: tpa_aggregations" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+					8, "[%s]: tpa_aggregated_frames"},
+	{ Q_STATS_OFFSET32(total_tpa_bytes_hi),	8, "[%s]: tpa_bytes"}
 };
 
 #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -98,8 +102,8 @@
 				8, STATS_FLAGS_BOTH, "rx_discards" },
 	{ STATS_OFFSET32(mac_filter_discard),
 				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
-	{ STATS_OFFSET32(xxoverflow_discard),
-				4, STATS_FLAGS_PORT, "rx_fw_discards" },
+	{ STATS_OFFSET32(mf_tag_discard),
+				4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
 	{ STATS_OFFSET32(brb_drop_hi),
 				8, STATS_FLAGS_PORT, "rx_brb_discard" },
 	{ STATS_OFFSET32(brb_truncate_hi),
@@ -158,10 +162,43 @@
 	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
 			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
 	{ STATS_OFFSET32(pause_frames_sent_hi),
-				8, STATS_FLAGS_PORT, "tx_pause_frames" }
+				8, STATS_FLAGS_PORT, "tx_pause_frames" },
+	{ STATS_OFFSET32(total_tpa_aggregations_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+	{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+	{ STATS_OFFSET32(total_tpa_bytes_hi),
+			8, STATS_FLAGS_FUNC, "tpa_bytes"}
 };
 
 #define BNX2X_NUM_STATS		ARRAY_SIZE(bnx2x_stats_arr)
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+	int port_type;
+	u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+	switch (bp->link_params.phy[phy_idx].media_type) {
+	case ETH_PHY_SFP_FIBER:
+	case ETH_PHY_XFP_FIBER:
+	case ETH_PHY_KR:
+	case ETH_PHY_CX4:
+		port_type = PORT_FIBRE;
+		break;
+	case ETH_PHY_DA_TWINAX:
+		port_type = PORT_DA;
+		break;
+	case ETH_PHY_BASE_T:
+		port_type = PORT_TP;
+		break;
+	case ETH_PHY_NOT_PRESENT:
+		port_type = PORT_NONE;
+		break;
+	case ETH_PHY_UNSPECIFIED:
+	default:
+		port_type = PORT_OTHER;
+		break;
+	}
+	return port_type;
+}
 
 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
@@ -188,12 +225,7 @@
 	if (IS_MF(bp))
 		ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
 
-	if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
-		cmd->port = PORT_TP;
-	else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
-		cmd->port = PORT_FIBRE;
-	else
-		BNX2X_ERR("XGXS PHY Failure detected\n");
+	cmd->port = bnx2x_get_port_type(bp);
 
 	cmd->phy_address = bp->mdio.prtad;
 	cmd->transceiver = XCVR_INTERNAL;
@@ -494,7 +526,7 @@
 			if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
 				regdump_len += wreg_addrs_e1h[i].size *
 					(1 + wreg_addrs_e1h[i].read_regs_count);
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
 			if (IS_E2_ONLINE(reg_addrs[i].info))
 				regdump_len += reg_addrs[i].size;
@@ -566,7 +598,7 @@
 		dump_hdr.info = RI_E1_ONLINE;
 	else if (CHIP_IS_E1H(bp))
 		dump_hdr.info = RI_E1H_ONLINE;
-	else if (CHIP_IS_E2(bp))
+	else if (!CHIP_IS_E1x(bp))
 		dump_hdr.info = RI_E2_ONLINE |
 		(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
 
@@ -587,23 +619,24 @@
 					*p++ = REG_RD(bp,
 						      reg_addrs[i].addr + j*4);
 
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
 			if (IS_E2_ONLINE(reg_addrs[i].info))
 				for (j = 0; j < reg_addrs[i].size; j++)
 					*p++ = REG_RD(bp,
 					      reg_addrs[i].addr + j*4);
 
-		bnx2x_read_pages_regs_e2(bp, p);
+		if (CHIP_IS_E2(bp))
+			bnx2x_read_pages_regs_e2(bp, p);
+		else
+			/* E3 paged registers read is unimplemented yet */
+			WARN_ON(1);
 	}
 	/* Re-enable parity attentions */
 	bnx2x_clear_blocks_parity(bp);
-	if (CHIP_PARITY_ENABLED(bp))
-		bnx2x_enable_blocks_parity(bp);
+	bnx2x_enable_blocks_parity(bp);
 }
 
-#define PHY_FW_VER_LEN			20
-
 static void bnx2x_get_drvinfo(struct net_device *dev,
 			      struct ethtool_drvinfo *info)
 {
@@ -682,8 +715,12 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
-	if (capable(CAP_NET_ADMIN))
+	if (capable(CAP_NET_ADMIN)) {
+		/* dump MCP trace */
+		if (level & BNX2X_MSG_MCP)
+			bnx2x_fw_dump_lvl(bp, KERN_INFO);
 		bp->msg_enable = level;
+	}
 }
 
 static int bnx2x_nway_reset(struct net_device *dev)
@@ -725,7 +762,7 @@
 	u32 val = 0;
 
 	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
 	if (CHIP_REV_IS_SLOW(bp))
 		count *= 100;
 
@@ -756,7 +793,7 @@
 	u32 val = 0;
 
 	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
 	if (CHIP_REV_IS_SLOW(bp))
 		count *= 100;
 
@@ -824,7 +861,7 @@
 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
 
 	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
 	if (CHIP_REV_IS_SLOW(bp))
 		count *= 100;
 
@@ -947,7 +984,7 @@
 	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
 
 	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
 	if (CHIP_REV_IS_SLOW(bp))
 		count *= 100;
 
@@ -1051,9 +1088,9 @@
 	while ((written_so_far < buf_size) && (rc == 0)) {
 		if (written_so_far == (buf_size - sizeof(u32)))
 			cmd_flags |= MCPR_NVM_COMMAND_LAST;
-		else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+		else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
 			cmd_flags |= MCPR_NVM_COMMAND_LAST;
-		else if ((offset % NVRAM_PAGE_SIZE) == 0)
+		else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
 			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
 
 		memcpy(&val, data_buf, 4);
@@ -1212,7 +1249,6 @@
 			       struct ethtool_ringparam *ering)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	int rc = 0;
 
 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
@@ -1229,12 +1265,7 @@
 	bp->rx_ring_size = ering->rx_pending;
 	bp->tx_ring_size = ering->tx_pending;
 
-	if (netif_running(dev)) {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-	return rc;
+	return bnx2x_reload_if_running(dev);
 }
 
 static void bnx2x_get_pauseparam(struct net_device *dev,
@@ -1313,60 +1344,129 @@
 	{ "idle check (online)" }
 };
 
+enum {
+	BNX2X_CHIP_E1_OFST = 0,
+	BNX2X_CHIP_E1H_OFST,
+	BNX2X_CHIP_E2_OFST,
+	BNX2X_CHIP_E3_OFST,
+	BNX2X_CHIP_E3B0_OFST,
+	BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1	(1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H	(1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2	(1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3	(1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0	(1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL	((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X	(BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
 static int bnx2x_test_registers(struct bnx2x *bp)
 {
 	int idx, i, rc = -ENODEV;
-	u32 wr_val = 0;
+	u32 wr_val = 0, hw;
 	int port = BP_PORT(bp);
 	static const struct {
+		u32 hw;
 		u32 offset0;
 		u32 offset1;
 		u32 mask;
 	} reg_tbl[] = {
-/* 0 */		{ BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
-		{ DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
-		{ HC_REG_AGG_INT_0,                    4, 0x000003ff },
-		{ PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
-		{ PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
-		{ PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
-		{ PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
-		{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
-		{ PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
-		{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
-/* 10 */	{ PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
-		{ QM_REG_CONNNUM_0,                    4, 0x000fffff },
-		{ TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
-		{ SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
-		{ SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
-		{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
-		{ XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
-		{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
-		{ NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
-		{ NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
-/* 20 */	{ NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
-		{ NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
-		{ NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
-		{ NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
-		{ NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
-		{ NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
-/* 30 */	{ NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
-		{ NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
-		{ NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
-		{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
-		{ NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
-		{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
-		{ NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
+/* 0 */		{ BNX2X_CHIP_MASK_ALL,
+			BRB1_REG_PAUSE_LOW_THRESHOLD_0,	4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			DORQ_REG_DB_ADDR0,		4, 0xffffffff },
+		{ BNX2X_CHIP_MASK_E1X,
+			HC_REG_AGG_INT_0,		4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PBF_REG_MAC_IF0_ENABLE,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+			PBF_REG_P0_INIT_CRD,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_E3B0,
+			PBF_REG_INIT_CRD_Q0,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PRS_REG_CID_PORT_0,		4, 0x00ffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_CDU0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TM0_L2P,		4, 0x000fffff },
+/* 10 */	{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TSDM0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			QM_REG_CONNNUM_0,		4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			TM_REG_LIN0_MAX_ACTIVE_CID,	4, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_0,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_7,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_CNT_CMD00,	4, 0x00000003 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_GLB_DEL_ACK_MAX_CNT_0,	4, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_T_BIT,		4, 0x00000001 },
+/* 20 */	{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_EMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_BMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_XCM0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_BRB0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_XCM_MASK,		4, 0x00000007 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_6_LEN,	68, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_0_CRC,	68, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_MAC_0_0,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_IP_0_1,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_IPV4_IPV6_0,	160, 0x00000001 },
+/* 30 */	{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_UDP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_TCP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_VLAN_ID_0,	160, 0x00000fff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS_SERDES0_MODE_SEL,	4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_STATUS_INTERRUPT_PORT0,	4, 0x07ffffff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_SERDES0_CTRL_PHY_ADDR,	16, 0x0000001f },
 
-		{ 0xffffffff, 0, 0x00000000 }
+		{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
 	};
 
 	if (!netif_running(bp->dev))
 		return rc;
 
+	if (CHIP_IS_E1(bp))
+		hw = BNX2X_CHIP_MASK_E1;
+	else if (CHIP_IS_E1H(bp))
+		hw = BNX2X_CHIP_MASK_E1H;
+	else if (CHIP_IS_E2(bp))
+		hw = BNX2X_CHIP_MASK_E2;
+	else if (CHIP_IS_E3B0(bp))
+		hw = BNX2X_CHIP_MASK_E3B0;
+	else /* e3 A0 */
+		hw = BNX2X_CHIP_MASK_E3;
+
 	/* Repeat the test twice:
 	   First by writing 0x00000000, second by writing 0xffffffff */
 	for (idx = 0; idx < 2; idx++) {
@@ -1382,8 +1482,7 @@
 
 		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
 			u32 offset, mask, save_val, val;
-			if (CHIP_IS_E2(bp) &&
-			    reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
+			if (!(hw & reg_tbl[i].hw))
 				continue;
 
 			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
@@ -1400,7 +1499,7 @@
 
 			/* verify value is as expected */
 			if ((val & mask) != (wr_val & mask)) {
-				DP(NETIF_MSG_PROBE,
+				DP(NETIF_MSG_HW,
 				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
 				   offset, val, wr_val, mask);
 				goto test_reg_exit;
@@ -1417,7 +1516,7 @@
 static int bnx2x_test_memory(struct bnx2x *bp)
 {
 	int i, j, rc = -ENODEV;
-	u32 val;
+	u32 val, index;
 	static const struct {
 		u32 offset;
 		int size;
@@ -1432,32 +1531,44 @@
 
 		{ 0xffffffff, 0 }
 	};
+
 	static const struct {
 		char *name;
 		u32 offset;
-		u32 e1_mask;
-		u32 e1h_mask;
-		u32 e2_mask;
+		u32 hw_mask[BNX2X_CHIP_MAX_OFST];
 	} prty_tbl[] = {
-		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2, 0 },
-		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0,   0 },
-		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0,   0 },
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,
+			{0x2,     0x2, 0, 0} },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+			{0,       0,   0, 0} },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,
+			{0x3ffc1, 0,   0, 0} },
 
-		{ NULL, 0xffffffff, 0, 0, 0 }
+		{ NULL, 0xffffffff, {0, 0, 0, 0} }
 	};
 
 	if (!netif_running(bp->dev))
 		return rc;
 
+	if (CHIP_IS_E1(bp))
+		index = BNX2X_CHIP_E1_OFST;
+	else if (CHIP_IS_E1H(bp))
+		index = BNX2X_CHIP_E1H_OFST;
+	else if (CHIP_IS_E2(bp))
+		index = BNX2X_CHIP_E2_OFST;
+	else /* e3 */
+		index = BNX2X_CHIP_E3_OFST;
+
 	/* pre-Check the parity status */
 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 		val = REG_RD(bp, prty_tbl[i].offset);
-		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
-		    (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
 			DP(NETIF_MSG_HW,
 			   "%s is 0x%x\n", prty_tbl[i].name, val);
 			goto test_mem_exit;
@@ -1472,9 +1583,7 @@
 	/* Check the parity status */
 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 		val = REG_RD(bp, prty_tbl[i].offset);
-		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
-		    (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
 			DP(NETIF_MSG_HW,
 			   "%s is 0x%x\n", prty_tbl[i].name, val);
 			goto test_mem_exit;
@@ -1491,12 +1600,16 @@
 {
 	int cnt = 1400;
 
-	if (link_up)
+	if (link_up) {
 		while (bnx2x_link_test(bp, is_serdes) && cnt--)
-			msleep(10);
+			msleep(20);
+
+		if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+			DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+	}
 }
 
-static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
 {
 	unsigned int pkt_size, num_pkts, i;
 	struct sk_buff *skb;
@@ -1505,14 +1618,14 @@
 	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
 	u16 tx_start_idx, tx_idx;
 	u16 rx_start_idx, rx_idx;
-	u16 pkt_prod, bd_prod;
+	u16 pkt_prod, bd_prod, rx_comp_cons;
 	struct sw_tx_bd *tx_buf;
 	struct eth_tx_start_bd *tx_start_bd;
 	struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
 	struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
 	dma_addr_t mapping;
 	union eth_rx_cqe *cqe;
-	u8 cqe_fp_flags;
+	u8 cqe_fp_flags, cqe_fp_type;
 	struct sw_rx_bd *rx_buf;
 	u16 len;
 	int rc = -ENODEV;
@@ -1524,7 +1637,8 @@
 			return -EINVAL;
 		break;
 	case BNX2X_MAC_LOOPBACK:
-		bp->link_params.loopback_mode = LOOPBACK_BMAC;
+		bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+						LOOPBACK_XMAC : LOOPBACK_BMAC;
 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 		break;
 	default:
@@ -1545,6 +1659,14 @@
 	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
 	for (i = ETH_HLEN; i < pkt_size; i++)
 		packet[i] = (unsigned char) (i & 0xff);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		rc = -ENOMEM;
+		dev_kfree_skb(skb);
+		BNX2X_ERR("Unable to map SKB\n");
+		goto test_loopback_exit;
+	}
 
 	/* send the loopback packet */
 	num_pkts = 0;
@@ -1559,8 +1681,6 @@
 
 	bd_prod = TX_BD(fp_tx->tx_bd_prod);
 	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -1590,6 +1710,7 @@
 	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
 
 	mmiowb();
+	barrier();
 
 	num_pkts++;
 	fp_tx->tx_bd_prod += 2; /* start + pbd */
@@ -1618,9 +1739,11 @@
 	if (rx_idx != rx_start_idx + num_pkts)
 		goto test_loopback_exit;
 
-	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+	rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+	cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
 	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-	if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+	if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
 		goto test_loopback_rx_exit;
 
 	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
@@ -1628,6 +1751,9 @@
 		goto test_loopback_rx_exit;
 
 	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+	dma_sync_single_for_device(&bp->pdev->dev,
+				   dma_unmap_addr(rx_buf, mapping),
+				   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
 	skb = rx_buf->skb;
 	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
 	for (i = ETH_HLEN; i < pkt_size; i++)
@@ -1653,7 +1779,7 @@
 	return rc;
 }
 
-static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
+static int bnx2x_test_loopback(struct bnx2x *bp)
 {
 	int rc = 0, res;
 
@@ -1666,13 +1792,13 @@
 	bnx2x_netif_stop(bp, 1);
 	bnx2x_acquire_phy_lock(bp);
 
-	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
+	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
 	if (res) {
 		DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
 		rc |= BNX2X_PHY_LOOPBACK_FAILED;
 	}
 
-	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
+	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
 	if (res) {
 		DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
 		rc |= BNX2X_MAC_LOOPBACK_FAILED;
@@ -1744,39 +1870,20 @@
 	return rc;
 }
 
+/* Send an EMPTY ramrod on the first queue */
 static int bnx2x_test_intr(struct bnx2x *bp)
 {
-	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-	int i, rc;
+	struct bnx2x_queue_state_params params = {0};
 
 	if (!netif_running(bp->dev))
 		return -ENODEV;
 
-	config->hdr.length = 0;
-	if (CHIP_IS_E1(bp))
-		config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
-	else
-		config->hdr.offset = BP_FUNC(bp);
-	config->hdr.client_id = bp->fp->cl_id;
-	config->hdr.reserved1 = 0;
+	params.q_obj = &bp->fp->q_obj;
+	params.cmd = BNX2X_Q_CMD_EMPTY;
 
-	bp->set_mac_pending = 1;
-	smp_wmb();
-	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
-	if (rc == 0) {
-		for (i = 0; i < 10; i++) {
-			if (!bp->set_mac_pending)
-				break;
-			smp_rmb();
-			msleep_interruptible(10);
-		}
-		if (i == 10)
-			rc = -ENODEV;
-	}
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 
-	return rc;
+	return bnx2x_queue_state_change(bp, &params);
 }
 
 static void bnx2x_self_test(struct net_device *dev,
@@ -1815,7 +1922,7 @@
 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 		bnx2x_nic_load(bp, LOAD_DIAG);
 		/* wait until link state is restored */
-		bnx2x_wait_for_link(bp, link_up, is_serdes);
+		bnx2x_wait_for_link(bp, 1, is_serdes);
 
 		if (bnx2x_test_registers(bp) != 0) {
 			buf[0] = 1;
@@ -1826,7 +1933,7 @@
 			etest->flags |= ETH_TEST_FL_FAILED;
 		}
 
-		buf[2] = bnx2x_test_loopback(bp, link_up);
+		buf[2] = bnx2x_test_loopback(bp);
 		if (buf[2] != 0)
 			etest->flags |= ETH_TEST_FL_FAILED;
 
@@ -1864,6 +1971,14 @@
 #define IS_MF_MODE_STAT(bp) \
 			(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
 
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+	return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -1872,7 +1987,7 @@
 	switch (stringset) {
 	case ETH_SS_STATS:
 		if (is_multi(bp)) {
-			num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
+			num_stats = bnx2x_num_stat_queues(bp) *
 				BNX2X_NUM_Q_STATS;
 			if (!IS_MF_MODE_STAT(bp))
 				num_stats += BNX2X_NUM_STATS;
@@ -1905,14 +2020,9 @@
 	case ETH_SS_STATS:
 		if (is_multi(bp)) {
 			k = 0;
-			for_each_napi_queue(bp, i) {
+			for_each_eth_queue(bp, i) {
 				memset(queue_name, 0, sizeof(queue_name));
-
-				if (IS_FCOE_IDX(i))
-					sprintf(queue_name, "fcoe");
-				else
-					sprintf(queue_name, "%d", i);
-
+				sprintf(queue_name, "%d", i);
 				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
 					snprintf(buf + (k + j)*ETH_GSTRING_LEN,
 						ETH_GSTRING_LEN,
@@ -1951,7 +2061,7 @@
 
 	if (is_multi(bp)) {
 		k = 0;
-		for_each_napi_queue(bp, i) {
+		for_each_eth_queue(bp, i) {
 			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
 			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
 				if (bnx2x_q_stats_arr[j].size == 0) {
@@ -2069,14 +2179,30 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	size_t copy_size =
-		min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+		min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	size_t i;
 
 	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 		return -EOPNOTSUPP;
 
-	indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
-	memcpy(indir->ring_index, bp->rx_indir_table,
-	       copy_size * sizeof(bp->rx_indir_table[0]));
+	/* Get the current configuration of the RSS indirection table */
+	bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+	/*
+	 * We can't use a memcpy() as an internal storage of an
+	 * indirection table is a u8 array while indir->ring_index
+	 * points to an array of u32.
+	 *
+	 * Indirection table contains the FW Client IDs, so we need to
+	 * align the returned table to the Client ID of the leading RSS
+	 * queue.
+	 */
+	for (i = 0; i < copy_size; i++)
+		indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+	indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
 	return 0;
 }
 
@@ -2085,21 +2211,33 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	size_t i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
 	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 		return -EOPNOTSUPP;
 
-	/* Validate size and indices */
-	if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+	/* validate the size */
+	if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
 		return -EINVAL;
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
-			return -EINVAL;
 
-	memcpy(bp->rx_indir_table, indir->ring_index,
-	       indir->size * sizeof(bp->rx_indir_table[0]));
-	bnx2x_push_indir_table(bp);
-	return 0;
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		/* validate the indices */
+		if (indir->ring_index[i] >= num_eth_queues)
+			return -EINVAL;
+		/*
+		 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+		 * as an internal storage of an indirection table is a u8 array
+		 * while indir->ring_index points to an array of u32.
+		 *
+		 * Indirection table contains the FW Client IDs, so we need to
+		 * align the received table to the Client ID of the leading RSS
+		 * queue
+		 */
+		ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+	}
+
+	return bnx2x_config_rss_pf(bp, ind_table, false);
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 9fe3678..998652a 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -10,249 +10,221 @@
 #ifndef BNX2X_FW_DEFS_H
 #define BNX2X_FW_DEFS_H
 
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[148].base)
 #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[141].base + ((assertListEntry) * IRO[141].m1))
-#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[144].base + ((pfId) * IRO[144].m1))
+	(IRO[147].base + ((assertListEntry) * IRO[147].m1))
 #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
-	(IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
-	IRO[149].m2))
+	(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+	IRO[153].m2))
 #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
-	(IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
-	IRO[150].m2))
+	(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+	IRO[154].m2))
 #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
-	(IRO[156].base + ((funcId) * IRO[156].m1))
+	(IRO[159].base + ((funcId) * IRO[159].m1))
 #define CSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[146].base + ((funcId) * IRO[146].m1))
-#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
-#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
+	(IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[311].base + ((pfId) * IRO[311].m1))
+	(IRO[315].base + ((pfId) * IRO[315].m1))
 #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[312].base + ((pfId) * IRO[312].m1))
-	#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-	(IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
-	IRO[304].m2))
-	#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
-	IRO[306].m2))
-	#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
-	IRO[305].m2))
-	#define \
-	CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
-	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
-	IRO[307].m2))
-	#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-	(IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
-	IRO[303].m2))
-	#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
-	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
-	IRO[309].m2))
-	#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
-	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
-	IRO[308].m2))
+	(IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[310].base + ((pfId) * IRO[310].m1))
+	(IRO[314].base + ((pfId) * IRO[314].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[302].base + ((pfId) * IRO[302].m1))
+	(IRO[306].base + ((pfId) * IRO[306].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[301].base + ((pfId) * IRO[301].m1))
+	(IRO[305].base + ((pfId) * IRO[305].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[300].base + ((pfId) * IRO[300].m1))
-#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
+	(IRO[304].base + ((pfId) * IRO[304].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[151].base + ((funcId) * IRO[151].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
-	(IRO[137].base + ((pfId) * IRO[137].m1))
-#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
-	(IRO[136].base + ((pfId) * IRO[136].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
-#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
-	(IRO[138].base + ((pfId) * IRO[138].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
-#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
+	(IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
 	(IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+	(IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+	(IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+	(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
 #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
-	(IRO[129].base + ((sbId) * IRO[129].m1))
+	(IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+	(IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+	(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
 #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
-	(IRO[128].base + ((sbId) * IRO[128].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
-#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
 	(IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+	(IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
 #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
-	(IRO[151].base + ((vfId) * IRO[151].m1))
+	(IRO[155].base + ((vfId) * IRO[155].m1))
 #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
-	(IRO[152].base + ((vfId) * IRO[152].m1))
+	(IRO[156].base + ((vfId) * IRO[156].m1))
 #define CSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[147].base + ((funcId) * IRO[147].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
+	(IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
-	(IRO[198].base + ((pfId) * IRO[198].m1))
-#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
+	(IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[98].base + ((assertListEntry) * IRO[98].m1))
-	#define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
-	(IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
-	IRO[197].m2))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
+	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
 #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
-	(IRO[105].base)
-#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[96].base + ((pfId) * IRO[96].m1))
-#define TSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[101].base + ((funcId) * IRO[101].m1))
+	(IRO[108].base)
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
-	(IRO[195].base + ((pfId) * IRO[195].m1))
-#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
-#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
-	(IRO[91].base + ((pfId) * IRO[91].m1))
-#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
-	#define \
-	TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
-	(IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
-	* IRO[260].m2))
+	(IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[103].base + ((funcId) * IRO[103].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[264].base + ((pfId) * IRO[264].m1))
+	(IRO[271].base + ((pfId) * IRO[271].m1))
 #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
-	(IRO[265].base + ((pfId) * IRO[265].m1))
+	(IRO[272].base + ((pfId) * IRO[272].m1))
 #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
-	(IRO[266].base + ((pfId) * IRO[266].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
-	(IRO[267].base + ((pfId) * IRO[267].m1))
-#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[263].base + ((pfId) * IRO[263].m1))
-#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[262].base + ((pfId) * IRO[262].m1))
-#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[261].base + ((pfId) * IRO[261].m1))
-#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[259].base + ((pfId) * IRO[259].m1))
-#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
-	(IRO[269].base + ((pfId) * IRO[269].m1))
-#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[256].base + ((pfId) * IRO[256].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[257].base + ((pfId) * IRO[257].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[258].base + ((pfId) * IRO[258].m1))
-#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
-	(IRO[196].base + ((pfId) * IRO[196].m1))
-	#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
-	(IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
-	IRO[100].m2))
-#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[95].base + ((pfId) * IRO[95].m1))
-#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-	(IRO[211].base + ((pfId) * IRO[211].m1))
-#define TSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[102].base + ((funcId) * IRO[102].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
-#define USTORM_AGG_DATA_SIZE (IRO[201].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
-#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[169].base + ((assertListEntry) * IRO[169].m1))
-#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
-	(IRO[178].base + ((portId) * IRO[178].m1))
-#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[172].base + ((pfId) * IRO[172].m1))
-#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-	(IRO[313].base + ((pfId) * IRO[313].m1))
-#define USTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[174].base + ((funcId) * IRO[174].m1))
-#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
-#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[277].base + ((pfId) * IRO[277].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[278].base + ((pfId) * IRO[278].m1))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
-	(IRO[279].base + ((pfId) * IRO[279].m1))
-#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[275].base + ((pfId) * IRO[275].m1))
-#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[274].base + ((pfId) * IRO[274].m1))
-#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
 	(IRO[273].base + ((pfId) * IRO[273].m1))
-#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[268].base + ((pfId) * IRO[268].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
 	(IRO[276].base + ((pfId) * IRO[276].m1))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
-	(IRO[280].base + ((pfId) * IRO[280].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[263].base + ((pfId) * IRO[263].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+	(IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[105].base + ((funcId) * IRO[105].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+	(IRO[216].base + ((pfId) * IRO[216].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[177].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+	(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
+	IRO[205].m2))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+	(IRO[183].base + ((portId) * IRO[183].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+	(IRO[317].base + ((pfId) * IRO[317].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[178].base + ((funcId) * IRO[178].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
 	(IRO[281].base + ((pfId) * IRO[281].m1))
-#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
-	(IRO[176].base + ((pfId) * IRO[176].m1))
-	#define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
-	(IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
-	IRO[173].m2))
-	#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
-	(IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
-	IRO[204].m2))
-#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
-	(IRO[205].base + ((qzoneId) * IRO[205].m1))
-#define USTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[171].base + ((pfId) * IRO[171].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
-#define USTORM_TPA_BTR_SIZE (IRO[202].size)
-#define USTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[175].base + ((funcId) * IRO[175].m1))
-#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
-#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
-#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
-#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[53].base + ((assertListEntry) * IRO[53].m1))
-#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
-	(IRO[47].base + ((portId) * IRO[47].m1))
-#define XSTORM_E1HOV_OFFSET(pfId) \
-	(IRO[55].base + ((pfId) * IRO[55].m1))
-#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[45].base + ((pfId) * IRO[45].m1))
-#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
-	(IRO[49].base + ((pfId) * IRO[49].m1))
-#define XSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[51].base + ((funcId) * IRO[51].m1))
-#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
-#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[290].base + ((pfId) * IRO[290].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-	(IRO[293].base + ((pfId) * IRO[293].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
-	(IRO[294].base + ((pfId) * IRO[294].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
-	(IRO[295].base + ((pfId) * IRO[295].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
-	(IRO[296].base + ((pfId) * IRO[296].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
-	(IRO[297].base + ((pfId) * IRO[297].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
-	(IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
-	(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[289].base + ((pfId) * IRO[289].m1))
-#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[288].base + ((pfId) * IRO[288].m1))
-#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[287].base + ((pfId) * IRO[287].m1))
-#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[292].base + ((pfId) * IRO[292].m1))
-#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
-	(IRO[291].base + ((pfId) * IRO[291].m1))
-#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
 	(IRO[286].base + ((pfId) * IRO[286].m1))
-#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[285].base + ((pfId) * IRO[285].m1))
-#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-	(IRO[284].base + ((pfId) * IRO[284].m1))
-#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
 	(IRO[283].base + ((pfId) * IRO[283].m1))
-#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
-	#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
-	(IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
-	IRO[50].m2))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[278].base + ((pfId) * IRO[278].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[277].base + ((pfId) * IRO[277].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+	(IRO[284].base + ((pfId) * IRO[284].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+	(IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+	(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+	IRO[209].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+	(IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+	(IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+	(IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+	(IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+	(IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+	(IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+	(IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+	(IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+	(IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+	(IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+	(IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+	(IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+	(IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+	(IRO[288].base + ((pfId) * IRO[288].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+	(IRO[287].base + ((pfId) * IRO[287].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
-	(IRO[48].base + ((pfId) * IRO[48].m1))
+	(IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[49].base + ((funcId) * IRO[49].m1))
 #define XSTORM_SPQ_DATA_OFFSET(funcId) \
 	(IRO[32].base + ((funcId) * IRO[32].m1))
 #define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
@@ -260,42 +232,37 @@
 	(IRO[30].base + ((funcId) * IRO[30].m1))
 #define XSTORM_SPQ_PROD_OFFSET(funcId) \
 	(IRO[31].base + ((funcId) * IRO[31].m1))
-#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[43].base + ((pfId) * IRO[43].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
-	(IRO[206].base + ((portId) * IRO[206].m1))
+	(IRO[211].base + ((portId) * IRO[211].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
-	(IRO[207].base + ((portId) * IRO[207].m1))
+	(IRO[212].base + ((portId) * IRO[212].m1))
 #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
-	(IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
-	IRO[209].m2))
+	(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+	IRO[214].m2))
 #define XSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[52].base + ((funcId) * IRO[52].m1))
+	(IRO[48].base + ((funcId) * IRO[48].m1))
 #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
 
-/* RSS hash types */
-#define DEFAULT_HASH_TYPE 0
-#define IPV4_HASH_TYPE 1
-#define TCP_IPV4_HASH_TYPE 2
-#define IPV6_HASH_TYPE 3
-#define TCP_IPV6_HASH_TYPE 4
-#define VLAN_PRI_HASH_TYPE 5
-#define E1HOV_PRI_HASH_TYPE 6
-#define DSCP_HASH_TYPE 7
+/**
+* This file defines HSI constants for the ETH flow
+*/
+#ifdef _EVEREST_MICROCODE
+#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
+#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
+#endif
 
 
 /* Ethernet Ring parameters */
 #define X_ETH_LOCAL_RING_SIZE 13
-#define FIRST_BD_IN_PKT 0
+#define FIRST_BD_IN_PKT	0
 #define PARSE_BD_INDEX 1
 #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
 #define U_ETH_NUM_OF_SGES_TO_FETCH 8
 #define U_ETH_MAX_SGES_FOR_PACKET 3
 
-/*Tx params*/
-#define X_ETH_NO_VLAN 0
-#define X_ETH_OUTBAND_VLAN 1
-#define X_ETH_INBAND_VLAN 2
 /* Rx ring params */
 #define U_ETH_LOCAL_BD_RING_SIZE 8
 #define U_ETH_LOCAL_SGE_RING_SIZE 10
@@ -311,79 +278,64 @@
 #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
 #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
 
-#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
-#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_BDS_PER_PAGE_MASK	(U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK	(TU_ETH_CQES_PER_PAGE-1)
 #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
 
 #define U_ETH_UNDEFINED_Q 0xFF
 
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_ETH_UNUSED 0
-#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
-#define RAMROD_CMD_ID_ETH_UPDATE 2
-#define RAMROD_CMD_ID_ETH_HALT 3
-#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
-#define RAMROD_CMD_ID_ETH_ACTIVATE 5
-#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
-#define RAMROD_CMD_ID_ETH_EMPTY 7
-#define RAMROD_CMD_ID_ETH_TERMINATE 8
-
-/* command values for set mac command */
-#define T_ETH_MAC_COMMAND_SET 0
-#define T_ETH_MAC_COMMAND_INVALIDATE 1
-
 #define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
 
 /*The CRC32 seed, that is used for the hash(reduction) multicast address */
-#define T_ETH_CRC32_HASH_SEED 0x00000000
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE	(8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
 
 /* Maximal L2 clients supported */
 #define ETH_MAX_RX_CLIENTS_E1 18
 #define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
 
-#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H	56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
 
 /* Maximal aggregation queues supported */
 #define ETH_MAX_AGGREGATION_QUEUES_E1 32
-#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
-
-/* ETH RSS modes */
-#define ETH_RSS_MODE_DISABLED 0
-#define ETH_RSS_MODE_REGULAR 1
-#define ETH_RSS_MODE_VLAN_PRI 2
-#define ETH_RSS_MODE_E1HOV_PRI 3
-#define ETH_RSS_MODE_IP_DSCP 4
-#define ETH_RSS_MODE_E2_INTEG 5
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
 
 
-/* ETH vlan filtering modes */
-#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
-#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
-	1 /* Only the vlan_id is allowed */
-#define ETH_VLAN_FILTER_CLASSIFY \
-	2 /* vlan will be added to CAM for classification */
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
 
-/* Fast path CQE selection */
-#define ETH_FP_CQE_REGULAR 0
-#define ETH_FP_CQE_SGL 1
-#define ETH_FP_CQE_RAW 2
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
 
 
 /**
-* This file defines HSI constants common to all microcode flows
-*/
-
-/* Connection types */
-#define ETH_CONNECTION_TYPE 0
-#define TOE_CONNECTION_TYPE 1
-#define RDMA_CONNECTION_TYPE 2
-#define ISCSI_CONNECTION_TYPE 3
-#define FCOE_CONNECTION_TYPE 4
-#define RESERVED_CONNECTION_TYPE_0 5
-#define RESERVED_CONNECTION_TYPE_1 6
-#define RESERVED_CONNECTION_TYPE_2 7
-#define NONE_CONNECTION_TYPE 8
-
+ * This file defines HSI constants common to all microcode flows
+ */
 
 #define PROTOCOL_STATE_BIT_OFFSET 6
 
@@ -391,25 +343,9 @@
 #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
 #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
 
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
-#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
-#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
-#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
-#define RAMROD_CMD_ID_COMMON_SET_MAC 5
-#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
-#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
-#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
-
 /* microcode fixed page page size 4K (chains and ring segments) */
 #define MC_PAGE_SIZE 4096
 
-
-/* Host coalescing constants */
-#define HC_IGU_BC_MODE 0
-#define HC_IGU_NBC_MODE 1
-/* Host coalescing constants. E1 includes E1H as well */
-
 /* Number of indices per slow-path SB */
 #define HC_SP_SB_MAX_INDICES 16
 
@@ -418,30 +354,17 @@
 #define HC_SB_MAX_INDICES_E2 8
 
 #define HC_SB_MAX_SB_E1X 32
-#define HC_SB_MAX_SB_E2 136
+#define HC_SB_MAX_SB_E2	136
 
 #define HC_SP_SB_ID 0xde
 
-#define HC_REGULAR_SEGMENT 0
-#define HC_DEFAULT_SEGMENT 1
 #define HC_SB_MAX_SM 2
 
 #define HC_SB_MAX_DYNAMIC_INDICES 4
-#define HC_FUNCTION_DISABLED 0xff
-/* used by the driver to get the SB offset */
-#define USTORM_ID 0
-#define CSTORM_ID 1
-#define XSTORM_ID 2
-#define TSTORM_ID 3
-#define ATTENTION_ID 4
 
 /* max number of slow path commands per port */
 #define MAX_RAMRODS_PER_PORT 8
 
-/* values for RX ETH CQE type field */
-#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
-#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
-
 
 /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
@@ -451,7 +374,7 @@
 
 #define XSEMI_CLK1_RESUL_CHIP (1e-3)
 
-#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
 
 /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
@@ -460,72 +383,28 @@
 
 #define FW_LOG_LIST_SIZE 50
 
-#define NUM_OF_PROTOCOLS 4
 #define NUM_OF_SAFC_BITS 16
 #define MAX_COS_NUMBER 4
-
-#define FAIRNESS_COS_WRR_MODE 0
-#define FAIRNESS_COS_ETS_MODE 1
-
-
-/* Priority Flow Control (PFC) */
+#define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-#define MAX_PFC_TRAFFIC_TYPES 8
-
-/* Available Traffic Types for Link Layer Flow Control */
-#define LLFC_TRAFFIC_TYPE_NW 0
-#define LLFC_TRAFFIC_TYPE_FCOE 1
-#define LLFC_TRAFFIC_TYPE_ISCSI 2
-	/***************** START OF E2 INTEGRATION \
-	CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
-	/***************** END OF E2 INTEGRATION \
-	CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_MAX 4
 
 	/* used by array traffic_type_to_priority[] to mark traffic type \
 	that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
 
-#define LLFC_MODE_NONE 0
-#define LLFC_MODE_PFC 1
-#define LLFC_MODE_SAFC 2
-
-#define DCB_DISABLED 0
-#define DCB_ENABLED 1
-
-#define UNKNOWN_ADDRESS 0
-#define UNICAST_ADDRESS 1
-#define MULTICAST_ADDRESS 2
-#define BROADCAST_ADDRESS 3
-
-#define SINGLE_FUNCTION 0
-#define MULTI_FUNCTION_SD 1
-#define MULTI_FUNCTION_SI 2
-
-#define IP_V4 0
-#define IP_V6 1
-
 
 #define C_ERES_PER_PAGE \
 	(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
 #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
 
-#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
-#define EVENT_RING_OPCODE_FUNCTION_START 1
-#define EVENT_RING_OPCODE_FUNCTION_STOP 2
-#define EVENT_RING_OPCODE_CFC_DEL 3
-#define EVENT_RING_OPCODE_CFC_DEL_WB 4
-#define EVENT_RING_OPCODE_SET_MAC 5
-#define EVENT_RING_OPCODE_STAT_QUERY 6
-#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
-#define EVENT_RING_OPCODE_START_TRAFFIC 8
-#define EVENT_RING_OPCODE_FORWARD_SETUP 9
+#define STATS_QUERY_CMD_COUNT 16
 
-#define VF_PF_CHANNEL_STATE_READY 0
-#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
+#define NIV_LIST_TABLE_SIZE 4096
 
-#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
+#define INVALID_VNIC_ID	0xFF
+
+
+#define UNDEF_IRO 0x80000000
 
 
 #endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index cdf19fe..df53558 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,7 +11,7 @@
 
 #include "bnx2x_fw_defs.h"
 
-#define FW_ENCODE_32BIT_PATTERN		0x1e1e1e1e
+#define FW_ENCODE_32BIT_PATTERN         0x1e1e1e1e
 
 struct license_key {
 	u32 reserved[6];
@@ -33,201 +33,366 @@
 	u32 reserved_b[4];
 };
 
-#define PORT_0				0
-#define PORT_1				1
-#define PORT_MAX			2
+
+#define PORT_0              0
+#define PORT_1              1
+#define PORT_MAX            2
 
 /****************************************************************************
- * Shared HW configuration						    *
+ * Shared HW configuration                                                  *
  ****************************************************************************/
-struct shared_hw_cfg {					 /* NVRAM Offset */
+#define PIN_CFG_NA                          0x00000000
+#define PIN_CFG_GPIO0_P0                    0x00000001
+#define PIN_CFG_GPIO1_P0                    0x00000002
+#define PIN_CFG_GPIO2_P0                    0x00000003
+#define PIN_CFG_GPIO3_P0                    0x00000004
+#define PIN_CFG_GPIO0_P1                    0x00000005
+#define PIN_CFG_GPIO1_P1                    0x00000006
+#define PIN_CFG_GPIO2_P1                    0x00000007
+#define PIN_CFG_GPIO3_P1                    0x00000008
+#define PIN_CFG_EPIO0                       0x00000009
+#define PIN_CFG_EPIO1                       0x0000000a
+#define PIN_CFG_EPIO2                       0x0000000b
+#define PIN_CFG_EPIO3                       0x0000000c
+#define PIN_CFG_EPIO4                       0x0000000d
+#define PIN_CFG_EPIO5                       0x0000000e
+#define PIN_CFG_EPIO6                       0x0000000f
+#define PIN_CFG_EPIO7                       0x00000010
+#define PIN_CFG_EPIO8                       0x00000011
+#define PIN_CFG_EPIO9                       0x00000012
+#define PIN_CFG_EPIO10                      0x00000013
+#define PIN_CFG_EPIO11                      0x00000014
+#define PIN_CFG_EPIO12                      0x00000015
+#define PIN_CFG_EPIO13                      0x00000016
+#define PIN_CFG_EPIO14                      0x00000017
+#define PIN_CFG_EPIO15                      0x00000018
+#define PIN_CFG_EPIO16                      0x00000019
+#define PIN_CFG_EPIO17                      0x0000001a
+#define PIN_CFG_EPIO18                      0x0000001b
+#define PIN_CFG_EPIO19                      0x0000001c
+#define PIN_CFG_EPIO20                      0x0000001d
+#define PIN_CFG_EPIO21                      0x0000001e
+#define PIN_CFG_EPIO22                      0x0000001f
+#define PIN_CFG_EPIO23                      0x00000020
+#define PIN_CFG_EPIO24                      0x00000021
+#define PIN_CFG_EPIO25                      0x00000022
+#define PIN_CFG_EPIO26                      0x00000023
+#define PIN_CFG_EPIO27                      0x00000024
+#define PIN_CFG_EPIO28                      0x00000025
+#define PIN_CFG_EPIO29                      0x00000026
+#define PIN_CFG_EPIO30                      0x00000027
+#define PIN_CFG_EPIO31                      0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA                         0x00000000
+#define EPIO_CFG_EPIO0                      0x00000001
+#define EPIO_CFG_EPIO1                      0x00000002
+#define EPIO_CFG_EPIO2                      0x00000003
+#define EPIO_CFG_EPIO3                      0x00000004
+#define EPIO_CFG_EPIO4                      0x00000005
+#define EPIO_CFG_EPIO5                      0x00000006
+#define EPIO_CFG_EPIO6                      0x00000007
+#define EPIO_CFG_EPIO7                      0x00000008
+#define EPIO_CFG_EPIO8                      0x00000009
+#define EPIO_CFG_EPIO9                      0x0000000a
+#define EPIO_CFG_EPIO10                     0x0000000b
+#define EPIO_CFG_EPIO11                     0x0000000c
+#define EPIO_CFG_EPIO12                     0x0000000d
+#define EPIO_CFG_EPIO13                     0x0000000e
+#define EPIO_CFG_EPIO14                     0x0000000f
+#define EPIO_CFG_EPIO15                     0x00000010
+#define EPIO_CFG_EPIO16                     0x00000011
+#define EPIO_CFG_EPIO17                     0x00000012
+#define EPIO_CFG_EPIO18                     0x00000013
+#define EPIO_CFG_EPIO19                     0x00000014
+#define EPIO_CFG_EPIO20                     0x00000015
+#define EPIO_CFG_EPIO21                     0x00000016
+#define EPIO_CFG_EPIO22                     0x00000017
+#define EPIO_CFG_EPIO23                     0x00000018
+#define EPIO_CFG_EPIO24                     0x00000019
+#define EPIO_CFG_EPIO25                     0x0000001a
+#define EPIO_CFG_EPIO26                     0x0000001b
+#define EPIO_CFG_EPIO27                     0x0000001c
+#define EPIO_CFG_EPIO28                     0x0000001d
+#define EPIO_CFG_EPIO29                     0x0000001e
+#define EPIO_CFG_EPIO30                     0x0000001f
+#define EPIO_CFG_EPIO31                     0x00000020
+
+
+struct shared_hw_cfg {			 /* NVRAM Offset */
 	/* Up to 16 bytes of NULL-terminated string */
-	u8  part_num[16];					/* 0x104 */
+	u8  part_num[16];		    /* 0x104 */
 
-	u32 config;						/* 0x114 */
-#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 	    0x00000001
-#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT	    0
-#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 	    0x00000000
-#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 	    0x00000001
-#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN	    0x00000002
+	u32 config;			/* 0x114 */
+	#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK             0x00000001
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT             0
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V              0x00000000
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V              0x00000001
+	#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN        0x00000002
 
-#define SHARED_HW_CFG_PORT_SWAP 		    0x00000004
+	#define SHARED_HW_CFG_PORT_SWAP                     0x00000004
 
-#define SHARED_HW_CFG_BEACON_WOL_EN		    0x00000008
+	#define SHARED_HW_CFG_BEACON_WOL_EN                 0x00000008
 
-#define SHARED_HW_CFG_MFW_SELECT_MASK		    0x00000700
-#define SHARED_HW_CFG_MFW_SELECT_SHIFT		    8
+	#define SHARED_HW_CFG_PCIE_GEN3_DISABLED            0x00000000
+	#define SHARED_HW_CFG_PCIE_GEN3_ENABLED             0x00000010
+
+	#define SHARED_HW_CFG_MFW_SELECT_MASK               0x00000700
+		#define SHARED_HW_CFG_MFW_SELECT_SHIFT               8
 	/* Whatever MFW found in NVM
 	   (if multiple found, priority order is: NC-SI, UMP, IPMI) */
-#define SHARED_HW_CFG_MFW_SELECT_DEFAULT	    0x00000000
-#define SHARED_HW_CFG_MFW_SELECT_NC_SI		    0x00000100
-#define SHARED_HW_CFG_MFW_SELECT_UMP		    0x00000200
-#define SHARED_HW_CFG_MFW_SELECT_IPMI		    0x00000300
+		#define SHARED_HW_CFG_MFW_SELECT_DEFAULT             0x00000000
+		#define SHARED_HW_CFG_MFW_SELECT_NC_SI               0x00000100
+		#define SHARED_HW_CFG_MFW_SELECT_UMP                 0x00000200
+		#define SHARED_HW_CFG_MFW_SELECT_IPMI                0x00000300
 	/* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
 	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI   0x00000400
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI    0x00000400
 	/* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
 	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI     0x00000500
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI      0x00000500
 	/* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
 	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP    0x00000600
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP     0x00000600
 
-#define SHARED_HW_CFG_LED_MODE_MASK		    0x000f0000
-#define SHARED_HW_CFG_LED_MODE_SHIFT		    16
-#define SHARED_HW_CFG_LED_MAC1			    0x00000000
-#define SHARED_HW_CFG_LED_PHY1			    0x00010000
-#define SHARED_HW_CFG_LED_PHY2			    0x00020000
-#define SHARED_HW_CFG_LED_PHY3			    0x00030000
-#define SHARED_HW_CFG_LED_MAC2			    0x00040000
-#define SHARED_HW_CFG_LED_PHY4			    0x00050000
-#define SHARED_HW_CFG_LED_PHY5			    0x00060000
-#define SHARED_HW_CFG_LED_PHY6			    0x00070000
-#define SHARED_HW_CFG_LED_MAC3			    0x00080000
-#define SHARED_HW_CFG_LED_PHY7			    0x00090000
-#define SHARED_HW_CFG_LED_PHY9			    0x000a0000
-#define SHARED_HW_CFG_LED_PHY11 		    0x000b0000
-#define SHARED_HW_CFG_LED_MAC4			    0x000c0000
-#define SHARED_HW_CFG_LED_PHY8			    0x000d0000
-#define SHARED_HW_CFG_LED_EXTPHY1		    0x000e0000
+	#define SHARED_HW_CFG_LED_MODE_MASK                 0x000f0000
+		#define SHARED_HW_CFG_LED_MODE_SHIFT                 16
+		#define SHARED_HW_CFG_LED_MAC1                       0x00000000
+		#define SHARED_HW_CFG_LED_PHY1                       0x00010000
+		#define SHARED_HW_CFG_LED_PHY2                       0x00020000
+		#define SHARED_HW_CFG_LED_PHY3                       0x00030000
+		#define SHARED_HW_CFG_LED_MAC2                       0x00040000
+		#define SHARED_HW_CFG_LED_PHY4                       0x00050000
+		#define SHARED_HW_CFG_LED_PHY5                       0x00060000
+		#define SHARED_HW_CFG_LED_PHY6                       0x00070000
+		#define SHARED_HW_CFG_LED_MAC3                       0x00080000
+		#define SHARED_HW_CFG_LED_PHY7                       0x00090000
+		#define SHARED_HW_CFG_LED_PHY9                       0x000a0000
+		#define SHARED_HW_CFG_LED_PHY11                      0x000b0000
+		#define SHARED_HW_CFG_LED_MAC4                       0x000c0000
+		#define SHARED_HW_CFG_LED_PHY8                       0x000d0000
+		#define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
 
 
-#define SHARED_HW_CFG_AN_ENABLE_MASK		    0x3f000000
-#define SHARED_HW_CFG_AN_ENABLE_SHIFT		    24
-#define SHARED_HW_CFG_AN_ENABLE_CL37		    0x01000000
-#define SHARED_HW_CFG_AN_ENABLE_CL73		    0x02000000
-#define SHARED_HW_CFG_AN_ENABLE_BAM		    0x04000000
-#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION  0x08000000
-#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
-#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY	    0x20000000
+	#define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
+		#define SHARED_HW_CFG_AN_ENABLE_SHIFT                24
+		#define SHARED_HW_CFG_AN_ENABLE_CL37                 0x01000000
+		#define SHARED_HW_CFG_AN_ENABLE_CL73                 0x02000000
+		#define SHARED_HW_CFG_AN_ENABLE_BAM                  0x04000000
+		#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION   0x08000000
+		#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT  0x10000000
+		#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY           0x20000000
 
-	u32 config2;						/* 0x118 */
+	#define SHARED_HW_CFG_SRIOV_MASK                    0x40000000
+		#define SHARED_HW_CFG_SRIOV_DISABLED                 0x00000000
+		#define SHARED_HW_CFG_SRIOV_ENABLED                  0x40000000
+
+	#define SHARED_HW_CFG_ATC_MASK                      0x80000000
+		#define SHARED_HW_CFG_ATC_DISABLED                   0x00000000
+		#define SHARED_HW_CFG_ATC_ENABLED                    0x80000000
+
+	u32 config2;			    /* 0x118 */
 	/* one time auto detect grace period (in sec) */
-#define SHARED_HW_CFG_GRACE_PERIOD_MASK 	    0x000000ff
-#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT	    0
+	#define SHARED_HW_CFG_GRACE_PERIOD_MASK             0x000000ff
+	#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT                     0
 
-#define SHARED_HW_CFG_PCIE_GEN2_ENABLED 	    0x00000100
+	#define SHARED_HW_CFG_PCIE_GEN2_ENABLED             0x00000100
+	#define SHARED_HW_CFG_PCIE_GEN2_DISABLED            0x00000000
 
 	/* The default value for the core clock is 250MHz and it is
 	   achieved by setting the clock change to 4 */
-#define SHARED_HW_CFG_CLOCK_CHANGE_MASK 	    0x00000e00
-#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT	    9
+	#define SHARED_HW_CFG_CLOCK_CHANGE_MASK             0x00000e00
+	#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT                     9
 
-#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ	    0x00000000
-#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ	    0x00001000
+	#define SHARED_HW_CFG_SMBUS_TIMING_MASK             0x00001000
+		#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ            0x00000000
+		#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ            0x00001000
 
-#define SHARED_HW_CFG_HIDE_PORT1		    0x00002000
+	#define SHARED_HW_CFG_HIDE_PORT1                    0x00002000
+
+	#define SHARED_HW_CFG_WOL_CAPABLE_MASK              0x00004000
+		#define SHARED_HW_CFG_WOL_CAPABLE_DISABLED           0x00000000
+		#define SHARED_HW_CFG_WOL_CAPABLE_ENABLED            0x00004000
+
+		/* Output low when PERST is asserted */
+	#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK       0x00008000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED    0x00000000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED     0x00008000
+
+	#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK    0x00070000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT    16
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW       0x00000000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB      0x00010000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB    0x00020000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB    0x00030000
 
 	/*  The fan failure mechanism is usually related to the PHY type
-	  since the power consumption of the board is determined by the PHY.
-	  Currently, fan is required for most designs with SFX7101, BCM8727
-	  and BCM8481. If a fan is not required for a board which uses one
-	  of those PHYs, this field should be set to "Disabled". If a fan is
-	  required for a different PHY type, this option should be set to
-	  "Enabled".
-	  The fan failure indication is expected on
-	  SPIO5 */
-#define SHARED_HW_CFG_FAN_FAILURE_MASK			      0x00180000
-#define SHARED_HW_CFG_FAN_FAILURE_SHIFT 		      19
-#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE		      0x00000000
-#define SHARED_HW_CFG_FAN_FAILURE_DISABLED		      0x00080000
-#define SHARED_HW_CFG_FAN_FAILURE_ENABLED		      0x00100000
+	      since the power consumption of the board is determined by the PHY.
+	      Currently, fan is required for most designs with SFX7101, BCM8727
+	      and BCM8481. If a fan is not required for a board which uses one
+	      of those PHYs, this field should be set to "Disabled". If a fan is
+	      required for a different PHY type, this option should be set to
+	      "Enabled". The fan failure indication is expected on SPIO5 */
+	#define SHARED_HW_CFG_FAN_FAILURE_MASK              0x00180000
+		#define SHARED_HW_CFG_FAN_FAILURE_SHIFT              19
+		#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE           0x00000000
+		#define SHARED_HW_CFG_FAN_FAILURE_DISABLED           0x00080000
+		#define SHARED_HW_CFG_FAN_FAILURE_ENABLED            0x00100000
 
-	/* Set the MDC/MDIO access for the first external phy */
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK	    0x1C000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT	    26
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE     0x00000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0	    0x04000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1	    0x08000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH	    0x0c000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED	    0x10000000
+		/* ASPM Power Management support */
+	#define SHARED_HW_CFG_ASPM_SUPPORT_MASK             0x00600000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT             21
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED    0x00000000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED      0x00200000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED       0x00400000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED   0x00600000
 
-	/* Set the MDC/MDIO access for the second external phy */
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK	    0xE0000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT	    29
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE     0x00000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0	    0x20000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1	    0x40000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH	    0x60000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED	    0x80000000
-	u32 power_dissipated;					/* 0x11c */
-#define SHARED_HW_CFG_POWER_DIS_CMN_MASK	    0xff000000
-#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT	    24
+	/* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+	   tl_control_0 (register 0x2800) */
+	#define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK         0x00800000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED      0x00000000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED       0x00800000
 
-#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK	    0x00ff0000
-#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT	    16
-#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE	    0x00000000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT	    0x00010000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT	    0x00020000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT	    0x00030000
+	#define SHARED_HW_CFG_PORT_MODE_MASK                0x01000000
+		#define SHARED_HW_CFG_PORT_MODE_2                    0x00000000
+		#define SHARED_HW_CFG_PORT_MODE_4                    0x01000000
 
-	u32 ump_nc_si_config;					/* 0x120 */
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK	    0x00000003
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT	    0
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC	    0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY	    0x00000001
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII	    0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII	    0x00000002
+	#define SHARED_HW_CFG_PATH_SWAP_MASK                0x02000000
+		#define SHARED_HW_CFG_PATH_SWAP_DISABLED             0x00000000
+		#define SHARED_HW_CFG_PATH_SWAP_ENABLED              0x02000000
 
-#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK	    0x00000f00
-#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT	    8
+	/*  Set the MDC/MDIO access for the first external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK         0x1C000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT         26
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0         0x04000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1         0x08000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH          0x0c000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED       0x10000000
 
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK   0x00ff0000
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT  16
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE   0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+	/*  Set the MDC/MDIO access for the second external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK         0xE0000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT         29
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0         0x20000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1         0x40000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH          0x60000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED       0x80000000
 
-	u32 board;						/* 0x124 */
-#define SHARED_HW_CFG_BOARD_REV_MASK		    0x00FF0000
-#define SHARED_HW_CFG_BOARD_REV_SHIFT		    16
 
-#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK	    0x0F000000
-#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT	    24
+	u32 power_dissipated;			/* 0x11c */
+	#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK         0x00ff0000
+		#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT         16
+		#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE       0x00000000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT          0x00010000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT         0x00020000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT        0x00030000
 
-#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK	    0xF0000000
-#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT	    28
+	#define SHARED_HW_CFG_POWER_DIS_CMN_MASK            0xff000000
+	#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT                    24
 
-	u32 reserved;						/* 0x128 */
+	u32 ump_nc_si_config;			/* 0x120 */
+	#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK       0x00000003
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT       0
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY         0x00000001
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII        0x00000002
 
+	#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK       0x00000f00
+		#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT       8
+
+	#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK   0x00ff0000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT   16
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE    0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+	u32 board;			/* 0x124 */
+	#define SHARED_HW_CFG_E3_I2C_MUX0_MASK              0x0000003F
+	#define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT                      0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_MASK              0x00000FC0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT                      6
+	/* Use the PIN_CFG_XXX defines on top */
+	#define SHARED_HW_CFG_BOARD_REV_MASK                0x00ff0000
+	#define SHARED_HW_CFG_BOARD_REV_SHIFT                        16
+
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK          0x0f000000
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT                  24
+
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK          0xf0000000
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT                  28
+
+	u32 wc_lane_config;				    /* 0x128 */
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_MASK            0x0000FFFF
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT            0
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32103210         0x00001b1b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32100123         0x00001be4
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01233210         0x0000e41b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01230123         0x0000e4e4
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK         0x000000FF
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                 0
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK         0x0000FF00
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                 8
+
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED     0x00010000
+	#define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED     0x00020000
+	#define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED     0x00040000
+	#define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED     0x00080000
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED     0x00100000
+	#define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED     0x00200000
+	#define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED     0x00400000
+	#define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED     0x00800000
+
+	/*  Selects the port layout of the board */
+	#define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK           0x0F000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT           24
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01           0x00000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10           0x01000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123         0x02000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032         0x03000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301         0x04000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210         0x05000000
 };
 
 
 /****************************************************************************
- * Port HW configuration						    *
+ * Port HW configuration                                                    *
  ****************************************************************************/
-struct port_hw_cfg {			    /* port 0: 0x12c  port 1: 0x2bc */
+struct port_hw_cfg {		    /* port 0: 0x12c  port 1: 0x2bc */
 
 	u32 pci_id;
-#define PORT_HW_CFG_PCI_VENDOR_ID_MASK		    0xffff0000
-#define PORT_HW_CFG_PCI_DEVICE_ID_MASK		    0x0000ffff
+	#define PORT_HW_CFG_PCI_VENDOR_ID_MASK              0xffff0000
+	#define PORT_HW_CFG_PCI_DEVICE_ID_MASK              0x0000ffff
 
 	u32 pci_sub_id;
-#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK	    0xffff0000
-#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK	    0x0000ffff
+	#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK       0xffff0000
+	#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK       0x0000ffff
 
 	u32 power_dissipated;
-#define PORT_HW_CFG_POWER_DIS_D3_MASK		    0xff000000
-#define PORT_HW_CFG_POWER_DIS_D3_SHIFT		    24
-#define PORT_HW_CFG_POWER_DIS_D2_MASK		    0x00ff0000
-#define PORT_HW_CFG_POWER_DIS_D2_SHIFT		    16
-#define PORT_HW_CFG_POWER_DIS_D1_MASK		    0x0000ff00
-#define PORT_HW_CFG_POWER_DIS_D1_SHIFT		    8
-#define PORT_HW_CFG_POWER_DIS_D0_MASK		    0x000000ff
-#define PORT_HW_CFG_POWER_DIS_D0_SHIFT		    0
+	#define PORT_HW_CFG_POWER_DIS_D0_MASK               0x000000ff
+	#define PORT_HW_CFG_POWER_DIS_D0_SHIFT                       0
+	#define PORT_HW_CFG_POWER_DIS_D1_MASK               0x0000ff00
+	#define PORT_HW_CFG_POWER_DIS_D1_SHIFT                       8
+	#define PORT_HW_CFG_POWER_DIS_D2_MASK               0x00ff0000
+	#define PORT_HW_CFG_POWER_DIS_D2_SHIFT                       16
+	#define PORT_HW_CFG_POWER_DIS_D3_MASK               0xff000000
+	#define PORT_HW_CFG_POWER_DIS_D3_SHIFT                       24
 
 	u32 power_consumed;
-#define PORT_HW_CFG_POWER_CONS_D3_MASK		    0xff000000
-#define PORT_HW_CFG_POWER_CONS_D3_SHIFT 	    24
-#define PORT_HW_CFG_POWER_CONS_D2_MASK		    0x00ff0000
-#define PORT_HW_CFG_POWER_CONS_D2_SHIFT 	    16
-#define PORT_HW_CFG_POWER_CONS_D1_MASK		    0x0000ff00
-#define PORT_HW_CFG_POWER_CONS_D1_SHIFT 	    8
-#define PORT_HW_CFG_POWER_CONS_D0_MASK		    0x000000ff
-#define PORT_HW_CFG_POWER_CONS_D0_SHIFT 	    0
+	#define PORT_HW_CFG_POWER_CONS_D0_MASK              0x000000ff
+	#define PORT_HW_CFG_POWER_CONS_D0_SHIFT                      0
+	#define PORT_HW_CFG_POWER_CONS_D1_MASK              0x0000ff00
+	#define PORT_HW_CFG_POWER_CONS_D1_SHIFT                      8
+	#define PORT_HW_CFG_POWER_CONS_D2_MASK              0x00ff0000
+	#define PORT_HW_CFG_POWER_CONS_D2_SHIFT                      16
+	#define PORT_HW_CFG_POWER_CONS_D3_MASK              0xff000000
+	#define PORT_HW_CFG_POWER_CONS_D3_SHIFT                      24
 
 	u32 mac_upper;
-#define PORT_HW_CFG_UPPERMAC_MASK		    0x0000ffff
-#define PORT_HW_CFG_UPPERMAC_SHIFT		    0
+	#define PORT_HW_CFG_UPPERMAC_MASK                   0x0000ffff
+	#define PORT_HW_CFG_UPPERMAC_SHIFT                           0
 	u32 mac_lower;
 
 	u32 iscsi_mac_upper;  /* Upper 16 bits are always zeroes */
@@ -237,642 +402,799 @@
 	u32 rdma_mac_lower;
 
 	u32 serdes_config;
-#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK	      0x0000FFFF
-#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT	      0
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT         0
 
-#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK	      0xFFFF0000
-#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT	      16
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK    0xffff0000
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT            16
 
 
-	u32 Reserved0[3];				    /* 0x158 */
-	/*	Controls the TX laser of the SFP+ module */
-	u32 sfp_ctrl;					/* 0x164 */
-#define PORT_HW_CFG_TX_LASER_MASK			      0x000000FF
-#define PORT_HW_CFG_TX_LASER_SHIFT			      0
-#define PORT_HW_CFG_TX_LASER_MDIO			      0x00000000
-#define PORT_HW_CFG_TX_LASER_GPIO0			      0x00000001
-#define PORT_HW_CFG_TX_LASER_GPIO1			      0x00000002
-#define PORT_HW_CFG_TX_LASER_GPIO2			      0x00000003
-#define PORT_HW_CFG_TX_LASER_GPIO3			      0x00000004
+	/*  Default values: 2P-64, 4P-32 */
+	u32 pf_config;					    /* 0x158 */
+	#define PORT_HW_CFG_PF_NUM_VF_MASK                  0x0000007F
+	#define PORT_HW_CFG_PF_NUM_VF_SHIFT                          0
 
-    /*	Controls the fault module LED of the SFP+ */
-#define PORT_HW_CFG_FAULT_MODULE_LED_MASK		      0x0000FF00
-#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT		      8
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0		      0x00000000
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1		      0x00000100
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2		      0x00000200
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3		      0x00000300
-#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED		      0x00000400
-	u32 Reserved01[12];				    /* 0x158 */
-	/*  for external PHY, or forced mode or during AN */
-	u16 xgxs_config_rx[4];				    /* 0x198 */
+	/*  Default values: 17 */
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK        0x00007F00
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT                8
 
-	u16 xgxs_config_tx[4];				    /* 0x1A0 */
+	#define PORT_HW_CFG_ENABLE_FLR_MASK                 0x00010000
+	#define PORT_HW_CFG_FLR_ENABLED                     0x00010000
 
-	u32 Reserved1[56];				    /* 0x1A8 */
-	u32 default_cfg;				    /* 0x288 */
-#define PORT_HW_CFG_GPIO0_CONFIG_MASK			      0x00000003
-#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT			      0
-#define PORT_HW_CFG_GPIO0_CONFIG_NA			      0x00000000
-#define PORT_HW_CFG_GPIO0_CONFIG_LOW			      0x00000001
-#define PORT_HW_CFG_GPIO0_CONFIG_HIGH			      0x00000002
-#define PORT_HW_CFG_GPIO0_CONFIG_INPUT			      0x00000003
+	u32 vf_config;					    /* 0x15C */
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK        0x0000007F
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT                0
 
-#define PORT_HW_CFG_GPIO1_CONFIG_MASK			      0x0000000C
-#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT			      2
-#define PORT_HW_CFG_GPIO1_CONFIG_NA			      0x00000000
-#define PORT_HW_CFG_GPIO1_CONFIG_LOW			      0x00000004
-#define PORT_HW_CFG_GPIO1_CONFIG_HIGH			      0x00000008
-#define PORT_HW_CFG_GPIO1_CONFIG_INPUT			      0x0000000c
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK           0xFFFF0000
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT                   16
 
-#define PORT_HW_CFG_GPIO2_CONFIG_MASK			      0x00000030
-#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT			      4
-#define PORT_HW_CFG_GPIO2_CONFIG_NA			      0x00000000
-#define PORT_HW_CFG_GPIO2_CONFIG_LOW			      0x00000010
-#define PORT_HW_CFG_GPIO2_CONFIG_HIGH			      0x00000020
-#define PORT_HW_CFG_GPIO2_CONFIG_INPUT			      0x00000030
+	u32 mf_pci_id;					    /* 0x160 */
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK           0x0000FFFF
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT                   0
 
-#define PORT_HW_CFG_GPIO3_CONFIG_MASK			      0x000000C0
-#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT			      6
-#define PORT_HW_CFG_GPIO3_CONFIG_NA			      0x00000000
-#define PORT_HW_CFG_GPIO3_CONFIG_LOW			      0x00000040
-#define PORT_HW_CFG_GPIO3_CONFIG_HIGH			      0x00000080
-#define PORT_HW_CFG_GPIO3_CONFIG_INPUT			      0x000000c0
+	/*  Controls the TX laser of the SFP+ module */
+	u32 sfp_ctrl;					    /* 0x164 */
+	#define PORT_HW_CFG_TX_LASER_MASK                   0x000000FF
+		#define PORT_HW_CFG_TX_LASER_SHIFT                   0
+		#define PORT_HW_CFG_TX_LASER_MDIO                    0x00000000
+		#define PORT_HW_CFG_TX_LASER_GPIO0                   0x00000001
+		#define PORT_HW_CFG_TX_LASER_GPIO1                   0x00000002
+		#define PORT_HW_CFG_TX_LASER_GPIO2                   0x00000003
+		#define PORT_HW_CFG_TX_LASER_GPIO3                   0x00000004
+
+	/*  Controls the fault module LED of the SFP+ */
+	#define PORT_HW_CFG_FAULT_MODULE_LED_MASK           0x0000FF00
+		#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT           8
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0           0x00000000
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1           0x00000100
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2           0x00000200
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3           0x00000300
+		#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED        0x00000400
+
+	/*  The output pin TX_DIS that controls the TX laser of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	u32 e3_sfp_ctrl;				    /* 0x168 */
+	#define PORT_HW_CFG_E3_TX_LASER_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_LASER_SHIFT                        0
+
+	/*  The output pin for SFPP_TYPE which turns on the Fault module LED */
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK           0x0000FF00
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT                   8
+
+	/*  The input pin MOD_ABS that indicates whether SFP+ module is
+	  present or not. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_MOD_ABS_MASK                 0x00FF0000
+	#define PORT_HW_CFG_E3_MOD_ABS_SHIFT                         16
+
+	/*  The output pin PWRDIS_SFP_X which disable the power of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_PWR_DIS_MASK                 0xFF000000
+	#define PORT_HW_CFG_E3_PWR_DIS_SHIFT                         24
 
 	/*
-	 * When KR link is required to be set to force which is not
-	 * KR-compliant, this parameter determine what is the trigger for it.
-	 * When GPIO is selected, low input will force the speed. Currently
-	 * default speed is 1G. In the future, it may be widen to select the
-	 * forced speed in with another parameter. Note when force-1G is
-	 * enabled, it override option 56: Link Speed option.
+	 * The input pin which signals module transmit fault. Use the
+	 * PIN_CFG_XXX defines on top
 	 */
-#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK		      0x00000F00
-#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT		      8
-#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED		      0x00000000
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0		      0x00000100
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0		      0x00000200
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0		      0x00000300
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0		      0x00000400
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1		      0x00000500
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1		      0x00000600
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1		      0x00000700
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1		      0x00000800
-#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED		      0x00000900
-    /*	Enable to determine with which GPIO to reset the external phy */
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK		      0x000F0000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT		      16
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE		      0x00000000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0		      0x00010000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0		      0x00020000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0		      0x00030000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0		      0x00040000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1		      0x00050000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1		      0x00060000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1		      0x00070000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1		      0x00080000
+	u32 e3_cmn_pin_cfg;				    /* 0x16C */
+	#define PORT_HW_CFG_E3_TX_FAULT_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_FAULT_SHIFT                        0
+
+	/*  The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+	 top */
+	#define PORT_HW_CFG_E3_PHY_RESET_MASK               0x0000FF00
+	#define PORT_HW_CFG_E3_PHY_RESET_SHIFT                       8
+
+	/*
+	 * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+	 * defines on top
+	 */
+	#define PORT_HW_CFG_E3_PWR_DOWN_MASK                0x00FF0000
+	#define PORT_HW_CFG_E3_PWR_DOWN_SHIFT                        16
+
+	/*  The output pin values BSC_SEL which selects the I2C for this port
+	  in the I2C Mux */
+	#define PORT_HW_CFG_E3_I2C_MUX0_MASK                0x01000000
+	#define PORT_HW_CFG_E3_I2C_MUX1_MASK                0x02000000
+
+
+	/*
+	 * The input pin I_FAULT which indicate over-current has occurred.
+	 * Use the PIN_CFG_XXX defines on top
+	 */
+	u32 e3_cmn_pin_cfg1;				    /* 0x170 */
+	#define PORT_HW_CFG_E3_OVER_CURRENT_MASK            0x000000FF
+	#define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT                    0
+	u32 reserved0[7];				    /* 0x174 */
+
+	u32 aeu_int_mask;				    /* 0x190 */
+
+	u32 media_type;					    /* 0x194 */
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK            0x000000FF
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT                    0
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK            0x0000FF00
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT                    8
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK            0x00FF0000
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT                    16
+
+	/*  4 times 16 bits for all 4 lanes. In case external PHY is present
+	      (not direct mode), those values will not take effect on the 4 XGXS
+	      lanes. For some external PHYs (such as 8706 and 8726) the values
+	      will be used to configure the external PHY  in those cases, not
+	      all 4 values are needed. */
+	u16 xgxs_config_rx[4];			/* 0x198 */
+	u16 xgxs_config_tx[4];			/* 0x1A0 */
+
+	/* For storing FCOE mac on shared memory */
+	u32 fcoe_fip_mac_upper;
+	#define PORT_HW_CFG_FCOE_UPPERMAC_MASK              0x0000ffff
+	#define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT                      0
+	u32 fcoe_fip_mac_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 Reserved1[50];					/* 0x1C0 */
+
+		u32 default_cfg;			    /* 0x288 */
+	#define PORT_HW_CFG_GPIO0_CONFIG_MASK               0x00000003
+		#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT               0
+		#define PORT_HW_CFG_GPIO0_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO0_CONFIG_LOW                 0x00000001
+		#define PORT_HW_CFG_GPIO0_CONFIG_HIGH                0x00000002
+		#define PORT_HW_CFG_GPIO0_CONFIG_INPUT               0x00000003
+
+	#define PORT_HW_CFG_GPIO1_CONFIG_MASK               0x0000000C
+		#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT               2
+		#define PORT_HW_CFG_GPIO1_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO1_CONFIG_LOW                 0x00000004
+		#define PORT_HW_CFG_GPIO1_CONFIG_HIGH                0x00000008
+		#define PORT_HW_CFG_GPIO1_CONFIG_INPUT               0x0000000c
+
+	#define PORT_HW_CFG_GPIO2_CONFIG_MASK               0x00000030
+		#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT               4
+		#define PORT_HW_CFG_GPIO2_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO2_CONFIG_LOW                 0x00000010
+		#define PORT_HW_CFG_GPIO2_CONFIG_HIGH                0x00000020
+		#define PORT_HW_CFG_GPIO2_CONFIG_INPUT               0x00000030
+
+	#define PORT_HW_CFG_GPIO3_CONFIG_MASK               0x000000C0
+		#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT               6
+		#define PORT_HW_CFG_GPIO3_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO3_CONFIG_LOW                 0x00000040
+		#define PORT_HW_CFG_GPIO3_CONFIG_HIGH                0x00000080
+		#define PORT_HW_CFG_GPIO3_CONFIG_INPUT               0x000000c0
+
+	/*  When KR link is required to be set to force which is not
+	      KR-compliant, this parameter determine what is the trigger for it.
+	      When GPIO is selected, low input will force the speed. Currently
+	      default speed is 1G. In the future, it may be widen to select the
+	      forced speed in with another parameter. Note when force-1G is
+	      enabled, it override option 56: Link Speed option. */
+	#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK           0x00000F00
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT           8
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED      0x00000000
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0        0x00000100
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0        0x00000200
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0        0x00000300
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0        0x00000400
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1        0x00000500
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1        0x00000600
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1        0x00000700
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1        0x00000800
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED          0x00000900
+	/*  Enable to determine with which GPIO to reset the external phy */
+	#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK           0x000F0000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT           16
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE        0x00000000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0        0x00010000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0        0x00020000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0        0x00030000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0        0x00040000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1        0x00050000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1        0x00060000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1        0x00070000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1        0x00080000
+
 	/*  Enable BAM on KR */
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK		      0x00100000
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT		      20
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED		      0x00000000
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED		      0x00100000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK           0x00100000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
 
 	/*  Enable Common Mode Sense */
-#define PORT_HW_CFG_ENABLE_CMS_MASK			      0x00200000
-#define PORT_HW_CFG_ENABLE_CMS_SHIFT			      21
-#define PORT_HW_CFG_ENABLE_CMS_DISABLED			      0x00000000
-#define PORT_HW_CFG_ENABLE_CMS_ENABLED			      0x00200000
+	#define PORT_HW_CFG_ENABLE_CMS_MASK                 0x00200000
+	#define PORT_HW_CFG_ENABLE_CMS_SHIFT                         21
+	#define PORT_HW_CFG_ENABLE_CMS_DISABLED                      0x00000000
+	#define PORT_HW_CFG_ENABLE_CMS_ENABLED                       0x00200000
+
+	/*  Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
+	#define PORT_HW_CFG_RJ45_PR_SWP_MASK                0x00400000
+	#define PORT_HW_CFG_RJ45_PR_SWP_SHIFT			     22
+	#define PORT_HW_CFG_RJ45_PR_SWP_DISABLED		     0x00000000
+	#define PORT_HW_CFG_RJ45_PR_SWP_ENABLED			     0x00400000
+
+	/*  Determine the Serdes electrical interface   */
+	#define PORT_HW_CFG_NET_SERDES_IF_MASK              0x0F000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SHIFT                      24
+	#define PORT_HW_CFG_NET_SERDES_IF_SGMII                      0x00000000
+	#define PORT_HW_CFG_NET_SERDES_IF_XFI                        0x01000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SFI                        0x02000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR                         0x03000000
+	#define PORT_HW_CFG_NET_SERDES_IF_DXGXS                      0x04000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR2                        0x05000000
+
 
 	u32 speed_capability_mask2;			    /* 0x28C */
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK		      0x0000FFFF
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT		      0
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL	      0x00000001
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__		      0x00000002
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___		      0x00000004
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL	      0x00000008
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G		      0x00000010
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G	      0x00000020
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G		      0x00000040
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G		      0x00000080
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G	      0x00000100
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G		      0x00000200
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G		      0x00000400
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G		      0x00000800
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK       0x0000FFFF
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT       0
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL    0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__           0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___          0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL   0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G          0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G    0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G         0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G         0x00000080
 
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK		      0xFFFF0000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT		      16
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL	      0x00010000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__		      0x00020000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___		      0x00040000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL	      0x00080000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G		      0x00100000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G	      0x00200000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G		      0x00400000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G		      0x00800000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G	      0x01000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G		      0x02000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G		      0x04000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G		      0x08000000
-
-	/* In the case where two media types (e.g. copper and fiber) are
-	  present and electrically active at the same time, PHY Selection
-	  will determine which of the two PHYs will be designated as the
-	  Active PHY and used for a connection to the network.	*/
-	u32 multi_phy_config;				/* 0x290 */
-#define PORT_HW_CFG_PHY_SELECTION_MASK		     0x00000007
-#define PORT_HW_CFG_PHY_SELECTION_SHIFT		     0
-#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT   0x00000000
-#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY	     0x00000001
-#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY	     0x00000002
-#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
-#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
-
-	/* When enabled, all second phy nvram parameters will be swapped
-	  with the first phy parameters */
-#define PORT_HW_CFG_PHY_SWAPPED_MASK		     0x00000008
-#define PORT_HW_CFG_PHY_SWAPPED_SHIFT		     3
-#define PORT_HW_CFG_PHY_SWAPPED_DISABLED	     0x00000000
-#define PORT_HW_CFG_PHY_SWAPPED_ENABLED		     0x00000008
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK       0xFFFF0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT       16
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL    0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__           0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___          0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL   0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G          0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G    0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G         0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G         0x00800000
 
 
-	/* Address of the second external phy */
-	u32 external_phy_config2;				/* 0x294 */
-#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK	    0x000000FF
-#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT	    0
+	/*  In the case where two media types (e.g. copper and fiber) are
+	      present and electrically active at the same time, PHY Selection
+	      will determine which of the two PHYs will be designated as the
+	      Active PHY and used for a connection to the network.  */
+	u32 multi_phy_config;				    /* 0x290 */
+	#define PORT_HW_CFG_PHY_SELECTION_MASK              0x00000007
+		#define PORT_HW_CFG_PHY_SELECTION_SHIFT              0
+		#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT   0x00000000
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY          0x00000001
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY         0x00000002
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
 
-	/* The second XGXS external PHY type */
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK	    0x0000FF00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT	    8
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT	    0x00000000
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071	    0x00000100
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072	    0x00000200
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073	    0x00000300
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705	    0x00000400
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706	    0x00000500
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726	    0x00000600
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481	    0x00000700
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101	    0x00000800
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727	    0x00000900
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC  0x00000a00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823     0x00000b00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640     0x00000c00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833     0x00000d00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE	    0x0000fd00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN     0x0000ff00
+	/*  When enabled, all second phy nvram parameters will be swapped
+	      with the first phy parameters */
+	#define PORT_HW_CFG_PHY_SWAPPED_MASK                0x00000008
+		#define PORT_HW_CFG_PHY_SWAPPED_SHIFT                3
+		#define PORT_HW_CFG_PHY_SWAPPED_DISABLED             0x00000000
+		#define PORT_HW_CFG_PHY_SWAPPED_ENABLED              0x00000008
 
-	/* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
-	  8706, 8726 and 8727) not all 4 values are needed. */
-	u16 xgxs_config2_rx[4];				/* 0x296 */
-	u16 xgxs_config2_tx[4];				/* 0x2A0 */
+
+	/*  Address of the second external phy */
+	u32 external_phy_config2;			    /* 0x294 */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK         0x000000FF
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT                 0
+
+	/*  The second XGXS external PHY type */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK         0x0000FF00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT         8
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT        0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071       0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072       0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073       0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705       0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706       0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726       0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481       0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101       0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727       0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC   0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823      0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640      0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833      0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
+
+
+	/*  4 times 16 bits for all 4 lanes. For some external PHYs (such as
+	      8706, 8726 and 8727) not all 4 values are needed. */
+	u16 xgxs_config2_rx[4];				    /* 0x296 */
+	u16 xgxs_config2_tx[4];				    /* 0x2A0 */
 
 	u32 lane_config;
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASK		    0x0000ffff
-#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 	    0
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASK              0x0000ffff
+		#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT              0
+		/* AN and forced */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01230123           0x00001b1b
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01233210           0x00001be4
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_31203120           0x0000d8d8
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_32103210           0x0000e4e4
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK           0x000000ff
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                   0
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK           0x0000ff00
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                   8
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK       0x0000c000
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT               14
 
-#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK	    0x000000ff
-#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT	    0
-#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK	    0x0000ff00
-#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT	    8
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK	    0x0000c000
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT	    14
-	/* AN and forced */
-#define PORT_HW_CFG_LANE_SWAP_CFG_01230123	    0x00001b1b
-	/* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_01233210	    0x00001be4
-	/* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_31203120	    0x0000d8d8
-	/* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_32103210	    0x0000e4e4
-    /*	Indicate whether to swap the external phy polarity */
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK	       0x00010000
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED	    0x00000000
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED	    0x00010000
+	/*  Indicate whether to swap the external phy polarity */
+	#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK          0x00010000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED       0x00000000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED        0x00010000
+
 
 	u32 external_phy_config;
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK	    0xff000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT	    24
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT	    0x00000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482     0x01000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN    0xff000000
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK          0x000000ff
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT                  0
 
-#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK	    0x00ff0000
-#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT	    16
+	#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK          0x0000ff00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT          8
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT         0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071        0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072        0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073        0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705        0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706        0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726        0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481        0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101        0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727        0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC    0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823       0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640       0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833       0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
 
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK	    0x0000ff00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT	    8
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT	    0x00000000
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071	    0x00000100
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072	    0x00000200
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073	    0x00000300
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705	    0x00000400
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706	    0x00000500
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726	    0x00000600
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481	    0x00000700
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101	    0x00000800
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727	    0x00000900
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823	    0x00000b00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833	    0x00000d00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE	    0x0000fd00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN	    0x0000ff00
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK        0x00ff0000
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT                16
 
-#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK	    0x000000ff
-#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT	    0
+	#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK        0xff000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT        24
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT       0x00000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482      0x01000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD    0x02000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN     0xff000000
 
 	u32 speed_capability_mask;
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK	    0xffff0000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT	    16
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL    0x00010000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF    0x00020000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF   0x00040000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL   0x00080000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G	    0x00100000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G	    0x00200000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G	    0x00400000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G	    0x00800000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G	    0x01000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G	    0x02000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G	    0x04000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G	    0x08000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED    0xf0000000
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK        0x0000ffff
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT        0
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL     0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF     0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF    0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL    0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G           0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G         0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G          0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G          0x00000080
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED     0x0000f000
 
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK	    0x0000ffff
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT	    0
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL    0x00000001
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF    0x00000002
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF   0x00000004
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL   0x00000008
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G	    0x00000010
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G	    0x00000020
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G	    0x00000040
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G	    0x00000080
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G	    0x00000100
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G	    0x00000200
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G	    0x00000400
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G	    0x00000800
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED    0x0000f000
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK        0xffff0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT        16
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL     0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF     0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF    0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL    0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G           0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G         0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G          0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G          0x00800000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED     0xf0000000
 
-	u32 reserved[2];
+	/*  A place to hold the original MAC address as a backup */
+	u32 backup_mac_upper;			/* 0x2B4 */
+	u32 backup_mac_lower;			/* 0x2B8 */
 
 };
 
 
 /****************************************************************************
- * Shared Feature configuration 					    *
+ * Shared Feature configuration                                             *
  ****************************************************************************/
-struct shared_feat_cfg {				 /* NVRAM Offset */
+struct shared_feat_cfg {		 /* NVRAM Offset */
 
-	u32 config;						/* 0x450 */
-#define SHARED_FEATURE_BMC_ECHO_MODE_EN 	    0x00000001
+	u32 config;			/* 0x450 */
+	#define SHARED_FEATURE_BMC_ECHO_MODE_EN             0x00000001
 
-	/*  Use the values from options 47 and 48 instead of the HW default
-	  values */
-#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED     0x00000000
-#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED      0x00000002
+	/* Use NVRAM values instead of HW default values */
+	#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+							    0x00000002
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+								     0x00000000
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+								     0x00000002
 
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK		      0x00000700
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT		      8
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED	      0x00000000
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF		      0x00000100
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4		      0x00000200
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT	      0x00000300
+	#define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK         0x00000008
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO          0x00000000
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM         0x00000008
+
+	#define SHARED_FEAT_CFG_NCSI_ID_MASK                0x00000030
+	#define SHARED_FEAT_CFG_NCSI_ID_SHIFT                        4
+
+	/*  Override the OTP back to single function mode. When using GPIO,
+	      high means only SF, 0 is according to CLP configuration */
+	#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK          0x00000700
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT          8
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED     0x00000000
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+
+	/* The interval in seconds between sending LLDP packets. Set to zero
+	   to disable the feature */
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK     0x00ff0000
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT             16
+
+	/* The assigned device type ID for LLDP usage */
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK    0xff000000
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT            24
 
 };
 
 
 /****************************************************************************
- * Port Feature configuration						    *
+ * Port Feature configuration                                               *
  ****************************************************************************/
-struct port_feat_cfg {			    /* port 0: 0x454  port 1: 0x4c8 */
+struct port_feat_cfg {		    /* port 0: 0x454  port 1: 0x4c8 */
 
 	u32 config;
-#define PORT_FEATURE_BAR1_SIZE_MASK		    0x0000000f
-#define PORT_FEATURE_BAR1_SIZE_SHIFT		    0
-#define PORT_FEATURE_BAR1_SIZE_DISABLED 	    0x00000000
-#define PORT_FEATURE_BAR1_SIZE_64K		    0x00000001
-#define PORT_FEATURE_BAR1_SIZE_128K		    0x00000002
-#define PORT_FEATURE_BAR1_SIZE_256K		    0x00000003
-#define PORT_FEATURE_BAR1_SIZE_512K		    0x00000004
-#define PORT_FEATURE_BAR1_SIZE_1M		    0x00000005
-#define PORT_FEATURE_BAR1_SIZE_2M		    0x00000006
-#define PORT_FEATURE_BAR1_SIZE_4M		    0x00000007
-#define PORT_FEATURE_BAR1_SIZE_8M		    0x00000008
-#define PORT_FEATURE_BAR1_SIZE_16M		    0x00000009
-#define PORT_FEATURE_BAR1_SIZE_32M		    0x0000000a
-#define PORT_FEATURE_BAR1_SIZE_64M		    0x0000000b
-#define PORT_FEATURE_BAR1_SIZE_128M		    0x0000000c
-#define PORT_FEATURE_BAR1_SIZE_256M		    0x0000000d
-#define PORT_FEATURE_BAR1_SIZE_512M		    0x0000000e
-#define PORT_FEATURE_BAR1_SIZE_1G		    0x0000000f
-#define PORT_FEATURE_BAR2_SIZE_MASK		    0x000000f0
-#define PORT_FEATURE_BAR2_SIZE_SHIFT		    4
-#define PORT_FEATURE_BAR2_SIZE_DISABLED 	    0x00000000
-#define PORT_FEATURE_BAR2_SIZE_64K		    0x00000010
-#define PORT_FEATURE_BAR2_SIZE_128K		    0x00000020
-#define PORT_FEATURE_BAR2_SIZE_256K		    0x00000030
-#define PORT_FEATURE_BAR2_SIZE_512K		    0x00000040
-#define PORT_FEATURE_BAR2_SIZE_1M		    0x00000050
-#define PORT_FEATURE_BAR2_SIZE_2M		    0x00000060
-#define PORT_FEATURE_BAR2_SIZE_4M		    0x00000070
-#define PORT_FEATURE_BAR2_SIZE_8M		    0x00000080
-#define PORT_FEATURE_BAR2_SIZE_16M		    0x00000090
-#define PORT_FEATURE_BAR2_SIZE_32M		    0x000000a0
-#define PORT_FEATURE_BAR2_SIZE_64M		    0x000000b0
-#define PORT_FEATURE_BAR2_SIZE_128M		    0x000000c0
-#define PORT_FEATURE_BAR2_SIZE_256M		    0x000000d0
-#define PORT_FEATURE_BAR2_SIZE_512M		    0x000000e0
-#define PORT_FEATURE_BAR2_SIZE_1G		    0x000000f0
-#define PORT_FEATURE_EN_SIZE_MASK		    0x07000000
-#define PORT_FEATURE_EN_SIZE_SHIFT		    24
-#define PORT_FEATURE_WOL_ENABLED		    0x01000000
-#define PORT_FEATURE_MBA_ENABLED		    0x02000000
-#define PORT_FEATURE_MFW_ENABLED		    0x04000000
+	#define PORT_FEATURE_BAR1_SIZE_MASK                 0x0000000f
+		#define PORT_FEATURE_BAR1_SIZE_SHIFT                 0
+		#define PORT_FEATURE_BAR1_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR1_SIZE_64K                   0x00000001
+		#define PORT_FEATURE_BAR1_SIZE_128K                  0x00000002
+		#define PORT_FEATURE_BAR1_SIZE_256K                  0x00000003
+		#define PORT_FEATURE_BAR1_SIZE_512K                  0x00000004
+		#define PORT_FEATURE_BAR1_SIZE_1M                    0x00000005
+		#define PORT_FEATURE_BAR1_SIZE_2M                    0x00000006
+		#define PORT_FEATURE_BAR1_SIZE_4M                    0x00000007
+		#define PORT_FEATURE_BAR1_SIZE_8M                    0x00000008
+		#define PORT_FEATURE_BAR1_SIZE_16M                   0x00000009
+		#define PORT_FEATURE_BAR1_SIZE_32M                   0x0000000a
+		#define PORT_FEATURE_BAR1_SIZE_64M                   0x0000000b
+		#define PORT_FEATURE_BAR1_SIZE_128M                  0x0000000c
+		#define PORT_FEATURE_BAR1_SIZE_256M                  0x0000000d
+		#define PORT_FEATURE_BAR1_SIZE_512M                  0x0000000e
+		#define PORT_FEATURE_BAR1_SIZE_1G                    0x0000000f
+	#define PORT_FEATURE_BAR2_SIZE_MASK                 0x000000f0
+		#define PORT_FEATURE_BAR2_SIZE_SHIFT                 4
+		#define PORT_FEATURE_BAR2_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR2_SIZE_64K                   0x00000010
+		#define PORT_FEATURE_BAR2_SIZE_128K                  0x00000020
+		#define PORT_FEATURE_BAR2_SIZE_256K                  0x00000030
+		#define PORT_FEATURE_BAR2_SIZE_512K                  0x00000040
+		#define PORT_FEATURE_BAR2_SIZE_1M                    0x00000050
+		#define PORT_FEATURE_BAR2_SIZE_2M                    0x00000060
+		#define PORT_FEATURE_BAR2_SIZE_4M                    0x00000070
+		#define PORT_FEATURE_BAR2_SIZE_8M                    0x00000080
+		#define PORT_FEATURE_BAR2_SIZE_16M                   0x00000090
+		#define PORT_FEATURE_BAR2_SIZE_32M                   0x000000a0
+		#define PORT_FEATURE_BAR2_SIZE_64M                   0x000000b0
+		#define PORT_FEATURE_BAR2_SIZE_128M                  0x000000c0
+		#define PORT_FEATURE_BAR2_SIZE_256M                  0x000000d0
+		#define PORT_FEATURE_BAR2_SIZE_512M                  0x000000e0
+		#define PORT_FEATURE_BAR2_SIZE_1G                    0x000000f0
 
-	/* Reserved bits: 28-29 */
-	/*  Check the optic vendor via i2c against a list of approved modules
-	  in a separate nvram image */
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK		      0xE0000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT		      29
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT	      0x00000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER       0x20000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG	      0x40000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN	      0x60000000
+	#define PORT_FEAT_CFG_DCBX_MASK                     0x00000100
+		#define PORT_FEAT_CFG_DCBX_DISABLED                  0x00000000
+		#define PORT_FEAT_CFG_DCBX_ENABLED                   0x00000100
 
+	#define PORT_FEAT_CFG_AUTOGREEN_MASK                0x00000200
+	#define PORT_FEAT_CFG_AUTOGREEN_SHIFT                        9
+	#define PORT_FEAT_CFG_AUTOGREEN_DISABLED                     0x00000000
+	#define PORT_FEAT_CFG_AUTOGREEN_ENABLED                      0x00000200
+
+	#define PORT_FEATURE_EN_SIZE_MASK                   0x0f000000
+	#define PORT_FEATURE_EN_SIZE_SHIFT                           24
+	#define PORT_FEATURE_WOL_ENABLED                             0x01000000
+	#define PORT_FEATURE_MBA_ENABLED                             0x02000000
+	#define PORT_FEATURE_MFW_ENABLED                             0x04000000
+
+	/* Advertise expansion ROM even if MBA is disabled */
+	#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK        0x08000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED     0x00000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED      0x08000000
+
+	/* Check the optic vendor via i2c against a list of approved modules
+	   in a separate nvram image */
+	#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK         0xe0000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT         29
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+								     0x00000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+								     0x20000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG   0x40000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN    0x60000000
 
 	u32 wol_config;
 	/* Default is used when driver sets to "auto" mode */
-#define PORT_FEATURE_WOL_DEFAULT_MASK		    0x00000003
-#define PORT_FEATURE_WOL_DEFAULT_SHIFT		    0
-#define PORT_FEATURE_WOL_DEFAULT_DISABLE	    0x00000000
-#define PORT_FEATURE_WOL_DEFAULT_MAGIC		    0x00000001
-#define PORT_FEATURE_WOL_DEFAULT_ACPI		    0x00000002
-#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI     0x00000003
-#define PORT_FEATURE_WOL_RES_PAUSE_CAP		    0x00000004
-#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP	    0x00000008
-#define PORT_FEATURE_WOL_ACPI_UPON_MGMT 	    0x00000010
+	#define PORT_FEATURE_WOL_DEFAULT_MASK               0x00000003
+		#define PORT_FEATURE_WOL_DEFAULT_SHIFT               0
+		#define PORT_FEATURE_WOL_DEFAULT_DISABLE             0x00000000
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC               0x00000001
+		#define PORT_FEATURE_WOL_DEFAULT_ACPI                0x00000002
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI      0x00000003
+	#define PORT_FEATURE_WOL_RES_PAUSE_CAP              0x00000004
+	#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP         0x00000008
+	#define PORT_FEATURE_WOL_ACPI_UPON_MGMT             0x00000010
 
 	u32 mba_config;
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK	    0x00000003
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT	    0
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE	    0x00000000
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL	    0x00000001
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP	    0x00000002
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB     0x00000003
-#define PORT_FEATURE_MBA_RES_PAUSE_CAP		    0x00000100
-#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP	    0x00000200
-#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE	    0x00000400
-#define PORT_FEATURE_MBA_HOTKEY_CTRL_S		    0x00000000
-#define PORT_FEATURE_MBA_HOTKEY_CTRL_B		    0x00000800
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK	    0x000ff000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT	    12
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED	    0x00000000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K	    0x00001000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K	    0x00002000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K	    0x00003000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K	    0x00004000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K	    0x00005000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K	    0x00006000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K	    0x00007000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K	    0x00008000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K	    0x00009000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M	    0x0000a000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M	    0x0000b000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M	    0x0000c000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M	    0x0000d000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M	    0x0000e000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M	    0x0000f000
-#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK	    0x00f00000
-#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT	    20
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK	    0x03000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT	    24
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO	    0x00000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS	    0x01000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H	    0x02000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H	    0x03000000
-#define PORT_FEATURE_MBA_LINK_SPEED_MASK	    0x3c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT	    26
-#define PORT_FEATURE_MBA_LINK_SPEED_AUTO	    0x00000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10HD	    0x04000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10FD	    0x08000000
-#define PORT_FEATURE_MBA_LINK_SPEED_100HD	    0x0c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_100FD	    0x10000000
-#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS	    0x14000000
-#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS	    0x18000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4	    0x1c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4	    0x20000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR	    0x24000000
-#define PORT_FEATURE_MBA_LINK_SPEED_12GBPS	    0x28000000
-#define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS	    0x2c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_13GBPS	    0x30000000
-#define PORT_FEATURE_MBA_LINK_SPEED_15GBPS	    0x34000000
-#define PORT_FEATURE_MBA_LINK_SPEED_16GBPS	    0x38000000
+	#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK       0x00000007
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT       0
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE         0x00000000
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL         0x00000001
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP       0x00000002
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB      0x00000003
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT   0x00000004
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE        0x00000007
 
+	#define PORT_FEATURE_MBA_BOOT_RETRY_MASK            0x00000038
+	#define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT                    3
+
+	#define PORT_FEATURE_MBA_RES_PAUSE_CAP              0x00000100
+	#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP         0x00000200
+	#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE        0x00000400
+	#define PORT_FEATURE_MBA_HOTKEY_MASK                0x00000800
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_S               0x00000000
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_B               0x00000800
+	#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK          0x000ff000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT          12
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED       0x00000000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K             0x00001000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K             0x00002000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K             0x00003000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K            0x00004000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K            0x00005000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K            0x00006000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K           0x00007000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K           0x00008000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K           0x00009000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M             0x0000a000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M             0x0000b000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M             0x0000c000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M             0x0000d000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M            0x0000e000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M            0x0000f000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK           0x00f00000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT                   20
+	#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK        0x03000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT        24
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO         0x00000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS          0x01000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H       0x02000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H       0x03000000
+	#define PORT_FEATURE_MBA_LINK_SPEED_MASK            0x3c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT            26
+		#define PORT_FEATURE_MBA_LINK_SPEED_AUTO             0x00000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10HD             0x04000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10FD             0x08000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100HD            0x0c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100FD            0x10000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS            0x14000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS          0x18000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4       0x1c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_20GBPS           0x20000000
 	u32 bmc_config;
-#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT	    0x00000000
-#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN	    0x00000001
+	#define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK         0x00000001
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT       0x00000000
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN            0x00000001
 
 	u32 mba_vlan_cfg;
-#define PORT_FEATURE_MBA_VLAN_TAG_MASK		    0x0000ffff
-#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 	    0
-#define PORT_FEATURE_MBA_VLAN_EN		    0x00010000
+	#define PORT_FEATURE_MBA_VLAN_TAG_MASK              0x0000ffff
+	#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT                      0
+	#define PORT_FEATURE_MBA_VLAN_EN                    0x00010000
 
 	u32 resource_cfg;
-#define PORT_FEATURE_RESOURCE_CFG_VALID 	    0x00000001
-#define PORT_FEATURE_RESOURCE_CFG_DIAG		    0x00000002
-#define PORT_FEATURE_RESOURCE_CFG_L2		    0x00000004
-#define PORT_FEATURE_RESOURCE_CFG_ISCSI 	    0x00000008
-#define PORT_FEATURE_RESOURCE_CFG_RDMA		    0x00000010
+	#define PORT_FEATURE_RESOURCE_CFG_VALID             0x00000001
+	#define PORT_FEATURE_RESOURCE_CFG_DIAG              0x00000002
+	#define PORT_FEATURE_RESOURCE_CFG_L2                0x00000004
+	#define PORT_FEATURE_RESOURCE_CFG_ISCSI             0x00000008
+	#define PORT_FEATURE_RESOURCE_CFG_RDMA              0x00000010
 
 	u32 smbus_config;
-	/* Obsolete */
-#define PORT_FEATURE_SMBUS_EN			    0x00000001
-#define PORT_FEATURE_SMBUS_ADDR_MASK		    0x000000fe
-#define PORT_FEATURE_SMBUS_ADDR_SHIFT		    1
+	#define PORT_FEATURE_SMBUS_ADDR_MASK                0x000000fe
+	#define PORT_FEATURE_SMBUS_ADDR_SHIFT                        1
 
-	u32 reserved1;
+	u32 vf_config;
+	#define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK             0x0000000f
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT             0
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED          0x00000000
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4K                0x00000001
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8K                0x00000002
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16K               0x00000003
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32K               0x00000004
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64K               0x00000005
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_128K              0x00000006
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_256K              0x00000007
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_512K              0x00000008
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_1M                0x00000009
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_2M                0x0000000a
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4M                0x0000000b
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8M                0x0000000c
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16M               0x0000000d
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32M               0x0000000e
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64M               0x0000000f
 
 	u32 link_config;    /* Used as HW defaults for the driver */
-#define PORT_FEATURE_CONNECTED_SWITCH_MASK	    0x03000000
-#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT	    24
-	/* (forced) low speed switch (< 10G) */
-#define PORT_FEATURE_CON_SWITCH_1G_SWITCH	    0x00000000
-	/* (forced) high speed switch (>= 10G) */
-#define PORT_FEATURE_CON_SWITCH_10G_SWITCH	    0x01000000
-#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT	    0x02000000
-#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT     0x03000000
+	#define PORT_FEATURE_CONNECTED_SWITCH_MASK          0x03000000
+		#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT          24
+		/* (forced) low speed switch (< 10G) */
+		#define PORT_FEATURE_CON_SWITCH_1G_SWITCH            0x00000000
+		/* (forced) high speed switch (>= 10G) */
+		#define PORT_FEATURE_CON_SWITCH_10G_SWITCH           0x01000000
+		#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT          0x02000000
+		#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT      0x03000000
 
-#define PORT_FEATURE_LINK_SPEED_MASK		    0x000f0000
-#define PORT_FEATURE_LINK_SPEED_SHIFT		    16
-#define PORT_FEATURE_LINK_SPEED_AUTO		    0x00000000
-#define PORT_FEATURE_LINK_SPEED_10M_FULL	    0x00010000
-#define PORT_FEATURE_LINK_SPEED_10M_HALF	    0x00020000
-#define PORT_FEATURE_LINK_SPEED_100M_HALF	    0x00030000
-#define PORT_FEATURE_LINK_SPEED_100M_FULL	    0x00040000
-#define PORT_FEATURE_LINK_SPEED_1G		    0x00050000
-#define PORT_FEATURE_LINK_SPEED_2_5G		    0x00060000
-#define PORT_FEATURE_LINK_SPEED_10G_CX4 	    0x00070000
-#define PORT_FEATURE_LINK_SPEED_10G_KX4 	    0x00080000
-#define PORT_FEATURE_LINK_SPEED_10G_KR		    0x00090000
-#define PORT_FEATURE_LINK_SPEED_12G		    0x000a0000
-#define PORT_FEATURE_LINK_SPEED_12_5G		    0x000b0000
-#define PORT_FEATURE_LINK_SPEED_13G		    0x000c0000
-#define PORT_FEATURE_LINK_SPEED_15G		    0x000d0000
-#define PORT_FEATURE_LINK_SPEED_16G		    0x000e0000
+	#define PORT_FEATURE_LINK_SPEED_MASK                0x000f0000
+		#define PORT_FEATURE_LINK_SPEED_SHIFT                16
+		#define PORT_FEATURE_LINK_SPEED_AUTO                 0x00000000
+		#define PORT_FEATURE_LINK_SPEED_10M_FULL             0x00010000
+		#define PORT_FEATURE_LINK_SPEED_10M_HALF             0x00020000
+		#define PORT_FEATURE_LINK_SPEED_100M_HALF            0x00030000
+		#define PORT_FEATURE_LINK_SPEED_100M_FULL            0x00040000
+		#define PORT_FEATURE_LINK_SPEED_1G                   0x00050000
+		#define PORT_FEATURE_LINK_SPEED_2_5G                 0x00060000
+		#define PORT_FEATURE_LINK_SPEED_10G_CX4              0x00070000
+		#define PORT_FEATURE_LINK_SPEED_20G                  0x00080000
 
-#define PORT_FEATURE_FLOW_CONTROL_MASK		    0x00000700
-#define PORT_FEATURE_FLOW_CONTROL_SHIFT 	    8
-#define PORT_FEATURE_FLOW_CONTROL_AUTO		    0x00000000
-#define PORT_FEATURE_FLOW_CONTROL_TX		    0x00000100
-#define PORT_FEATURE_FLOW_CONTROL_RX		    0x00000200
-#define PORT_FEATURE_FLOW_CONTROL_BOTH		    0x00000300
-#define PORT_FEATURE_FLOW_CONTROL_NONE		    0x00000400
+	#define PORT_FEATURE_FLOW_CONTROL_MASK              0x00000700
+		#define PORT_FEATURE_FLOW_CONTROL_SHIFT              8
+		#define PORT_FEATURE_FLOW_CONTROL_AUTO               0x00000000
+		#define PORT_FEATURE_FLOW_CONTROL_TX                 0x00000100
+		#define PORT_FEATURE_FLOW_CONTROL_RX                 0x00000200
+		#define PORT_FEATURE_FLOW_CONTROL_BOTH               0x00000300
+		#define PORT_FEATURE_FLOW_CONTROL_NONE               0x00000400
 
 	/* The default for MCP link configuration,
-	uses the same defines as link_config */
+	   uses the same defines as link_config */
 	u32 mfw_wol_link_cfg;
+
 	/* The default for the driver of the second external phy,
-	uses the same defines as link_config */
-	u32 link_config2;					/* 0x47C */
+	   uses the same defines as link_config */
+	u32 link_config2;				    /* 0x47C */
 
 	/* The default for MCP of the second external phy,
-	uses the same defines as link_config */
-	u32 mfw_wol_link_cfg2;				/* 0x480 */
+	   uses the same defines as link_config */
+	u32 mfw_wol_link_cfg2;				    /* 0x480 */
 
-	u32 Reserved2[17];					/* 0x484 */
+	u32 Reserved2[17];				    /* 0x484 */
 
 };
 
 
 /****************************************************************************
- * Device Information							    *
+ * Device Information                                                       *
  ****************************************************************************/
-struct shm_dev_info {						    /* size */
+struct shm_dev_info {				/* size */
 
 	u32    bc_rev; /* 8 bits each: major, minor, build */	       /* 4 */
 
-	struct shared_hw_cfg	 shared_hw_config;		      /* 40 */
+	struct shared_hw_cfg     shared_hw_config;	      /* 40 */
 
-	struct port_hw_cfg	 port_hw_config[PORT_MAX];     /* 400*2=800 */
+	struct port_hw_cfg       port_hw_config[PORT_MAX];     /* 400*2=800 */
 
-	struct shared_feat_cfg	 shared_feature_config; 	       /* 4 */
+	struct shared_feat_cfg   shared_feature_config;		   /* 4 */
 
-	struct port_feat_cfg	 port_feature_config[PORT_MAX];/* 116*2=232 */
+	struct port_feat_cfg     port_feature_config[PORT_MAX];/* 116*2=232 */
 
 };
 
 
-#define FUNC_0				0
-#define FUNC_1				1
-#define FUNC_2				2
-#define FUNC_3				3
-#define FUNC_4				4
-#define FUNC_5				5
-#define FUNC_6				6
-#define FUNC_7				7
-#define E1_FUNC_MAX			2
-#define E1H_FUNC_MAX			8
-#define E2_FUNC_MAX	    4	/* per path */
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+	#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
 
-#define VN_0				0
-#define VN_1				1
-#define VN_2				2
-#define VN_3				3
-#define E1VN_MAX			1
-#define E1HVN_MAX			4
+#define FUNC_0              0
+#define FUNC_1              1
+#define FUNC_2              2
+#define FUNC_3              3
+#define FUNC_4              4
+#define FUNC_5              5
+#define FUNC_6              6
+#define FUNC_7              7
+#define E1_FUNC_MAX         2
+#define E1H_FUNC_MAX            8
+#define E2_FUNC_MAX         4   /* per path */
 
-#define E2_VF_MAX			64
+#define VN_0                0
+#define VN_1                1
+#define VN_2                2
+#define VN_3                3
+#define E1VN_MAX            1
+#define E1HVN_MAX           4
+
+#define E2_VF_MAX           64  /* HC_REG_VF_CONFIGURATION_SIZE */
 /* This value (in milliseconds) determines the frequency of the driver
  * issuing the PULSE message code.  The firmware monitors this periodic
  * pulse to determine when to switch to an OS-absent mode. */
-#define DRV_PULSE_PERIOD_MS		250
+#define DRV_PULSE_PERIOD_MS     250
 
 /* This value (in milliseconds) determines how long the driver should
  * wait for an acknowledgement from the firmware before timing out.  Once
  * the firmware has timed out, the driver will assume there is no firmware
  * running and there won't be any firmware-driver synchronization during a
  * driver reset. */
-#define FW_ACK_TIME_OUT_MS		5000
+#define FW_ACK_TIME_OUT_MS      5000
 
-#define FW_ACK_POLL_TIME_MS		1
+#define FW_ACK_POLL_TIME_MS     1
 
-#define FW_ACK_NUM_OF_POLL	(FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+#define FW_ACK_NUM_OF_POLL  (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
 
 /* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL		480
+#define LED_BLINK_RATE_VAL      480
 
 /****************************************************************************
- * Driver <-> FW Mailbox						    *
+ * Driver <-> FW Mailbox                                                    *
  ****************************************************************************/
 struct drv_port_mb {
 
 	u32 link_status;
 	/* Driver should update this field on any link change event */
 
-#define LINK_STATUS_LINK_FLAG_MASK			0x00000001
-#define LINK_STATUS_LINK_UP				0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK		0x0000001E
-#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE	(0<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10THD		(1<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD		(2<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD		(3<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100T4		(4<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD		(5<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD		(6<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD		(7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD		(7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD		(8<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD		(9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD		(9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD		(10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD		(10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD		(11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD		(11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD		(12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD		(12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD		(13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD		(13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD		(14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD		(14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD		(15<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD		(15<<1)
+	#define LINK_STATUS_LINK_FLAG_MASK			0x00000001
+	#define LINK_STATUS_LINK_UP				0x00000001
+	#define LINK_STATUS_SPEED_AND_DUPLEX_MASK		0x0000001E
+	#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE	(0<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10THD		(1<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD		(2<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD		(3<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100T4		(4<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD		(5<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD		(6<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD		(8<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD		(11<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD		(11<<1)
 
-#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK		0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED		0x00000020
+	#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK		0x00000020
+	#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED		0x00000020
 
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE		0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK	0x00000080
-#define LINK_STATUS_PARALLEL_DETECTION_USED		0x00000080
+	#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE		0x00000040
+	#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK	0x00000080
+	#define LINK_STATUS_PARALLEL_DETECTION_USED		0x00000080
 
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE	0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE	0x00000400
-#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE		0x00000800
-#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE	0x00001000
-#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE	0x00002000
-#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE		0x00004000
-#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE		0x00008000
+	#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE	0x00000200
+	#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE	0x00000400
+	#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE		0x00000800
+	#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE	0x00001000
+	#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE	0x00002000
+	#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE		0x00004000
+	#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE		0x00008000
 
-#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK		0x00010000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED		0x00010000
+	#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK		0x00010000
+	#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED		0x00010000
 
-#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK		0x00020000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED		0x00020000
+	#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK		0x00020000
+	#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED		0x00020000
 
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK	0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE	(0<<18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE	(1<<18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE	(2<<18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE		(3<<18)
+	#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK	0x000C0000
+	#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE	(0<<18)
+	#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE	(1<<18)
+	#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE	(2<<18)
+	#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE		(3<<18)
 
-#define LINK_STATUS_SERDES_LINK 			0x00100000
+	#define LINK_STATUS_SERDES_LINK				0x00100000
 
-#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE	0x00200000
-#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE	0x00400000
-#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 	0x00800000
-#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 	0x01000000
-#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE	0x02000000
-#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 	0x04000000
-#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 	0x08000000
-#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 	0x10000000
+	#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE	0x00200000
+	#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE	0x00400000
+	#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE		0x00800000
+	#define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE		0x10000000
 
 	u32 port_stx;
 
@@ -887,138 +1209,158 @@
 struct drv_func_mb {
 
 	u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK				0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ				0x10000000
-#define DRV_MSG_CODE_LOAD_DONE				0x11000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN			0x20000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 		0x20010000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 		0x20020000
-#define DRV_MSG_CODE_UNLOAD_DONE			0x21000000
-#define DRV_MSG_CODE_DCC_OK				0x30000000
-#define DRV_MSG_CODE_DCC_FAILURE			0x31000000
-#define DRV_MSG_CODE_DIAG_ENTER_REQ			0x50000000
-#define DRV_MSG_CODE_DIAG_EXIT_REQ			0x60000000
-#define DRV_MSG_CODE_VALIDATE_KEY			0x70000000
-#define DRV_MSG_CODE_GET_CURR_KEY			0x80000000
-#define DRV_MSG_CODE_GET_UPGRADE_KEY			0x81000000
-#define DRV_MSG_CODE_GET_MANUF_KEY			0x82000000
-#define DRV_MSG_CODE_LOAD_L2B_PRAM			0x90000000
+	#define DRV_MSG_CODE_MASK                       0xffff0000
+	#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+	#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN          0x20000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS         0x20010000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP         0x20020000
+	#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+	#define DRV_MSG_CODE_DCC_OK                     0x30000000
+	#define DRV_MSG_CODE_DCC_FAILURE                0x31000000
+	#define DRV_MSG_CODE_DIAG_ENTER_REQ             0x50000000
+	#define DRV_MSG_CODE_DIAG_EXIT_REQ              0x60000000
+	#define DRV_MSG_CODE_VALIDATE_KEY               0x70000000
+	#define DRV_MSG_CODE_GET_CURR_KEY               0x80000000
+	#define DRV_MSG_CODE_GET_UPGRADE_KEY            0x81000000
+	#define DRV_MSG_CODE_GET_MANUF_KEY              0x82000000
+	#define DRV_MSG_CODE_LOAD_L2B_PRAM              0x90000000
 	/*
-	 * The optic module verification commands require bootcode
-	 * v5.0.6 or later
+	 * The optic module verification command requires bootcode
+	 * v5.0.6 or later, te specific optic module verification command
+	 * requires bootcode v5.2.12 or later
 	 */
-#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL	0xa0000000
-#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL	0x00050006
-	/*
-	 * The specific optic module verification command requires bootcode
-	 * v5.2.12 or later
-	 */
-#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL	    0xa1000000
-#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL	    0x00050234
+	#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL     0xa0000000
+	#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
+	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
+	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
 
-#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG			0xb0000000
-#define DRV_MSG_CODE_DCBX_PMF_DRV_OK			0xb2000000
-#define DRV_MSG_CODE_SET_MF_BW				0xe0000000
-#define REQ_BC_VER_4_SET_MF_BW				0x00060202
-#define DRV_MSG_CODE_SET_MF_BW_ACK			0xe1000000
-#define BIOS_MSG_CODE_LIC_CHALLENGE			0xff010000
-#define BIOS_MSG_CODE_LIC_RESPONSE			0xff020000
-#define BIOS_MSG_CODE_VIRT_MAC_PRIM			0xff030000
-#define BIOS_MSG_CODE_VIRT_MAC_ISCSI			0xff040000
+	#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
+	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
-#define DRV_MSG_SEQ_NUMBER_MASK 			0x0000ffff
+	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+	#define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
+	#define REQ_BC_VER_4_SET_MF_BW                  0x00060202
+	#define DRV_MSG_CODE_SET_MF_BW_ACK              0xe1000000
+
+	#define DRV_MSG_CODE_LINK_STATUS_CHANGED        0x01000000
+
+	#define BIOS_MSG_CODE_LIC_CHALLENGE             0xff010000
+	#define BIOS_MSG_CODE_LIC_RESPONSE              0xff020000
+	#define BIOS_MSG_CODE_VIRT_MAC_PRIM             0xff030000
+	#define BIOS_MSG_CODE_VIRT_MAC_ISCSI            0xff040000
+
+	#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
 
 	u32 drv_mb_param;
+	#define DRV_MSG_CODE_SET_MF_BW_MIN_MASK         0x00ff0000
+	#define DRV_MSG_CODE_SET_MF_BW_MAX_MASK         0xff000000
 
 	u32 fw_mb_header;
-#define FW_MSG_CODE_MASK				0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_COMMON			0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT			0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION			0x10120000
-	/* Load common chip is supported from bc 6.0.0	*/
-#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP	0x00060000
-#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP	0x10130000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED			0x10200000
-#define FW_MSG_CODE_DRV_LOAD_DONE			0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_COMMON			0x20100000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT			0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 		0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE			0x21100000
-#define FW_MSG_CODE_DCC_DONE				0x30100000
-#define FW_MSG_CODE_DIAG_ENTER_DONE			0x50100000
-#define FW_MSG_CODE_DIAG_REFUSE 			0x50200000
-#define FW_MSG_CODE_DIAG_EXIT_DONE			0x60100000
-#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS		0x70100000
-#define FW_MSG_CODE_VALIDATE_KEY_FAILURE		0x70200000
-#define FW_MSG_CODE_GET_KEY_DONE			0x80100000
-#define FW_MSG_CODE_NO_KEY				0x80f00000
-#define FW_MSG_CODE_LIC_INFO_NOT_READY			0x80f80000
-#define FW_MSG_CODE_L2B_PRAM_LOADED			0x90100000
-#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE		0x90210000
-#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE		0x90220000
-#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE		0x90230000
-#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE		0x90240000
-#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS		0xa0100000
-#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG		0xa0200000
-#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED		0xa0300000
+	#define FW_MSG_CODE_MASK                        0xffff0000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
+	#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+	#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+	/* Load common chip is supported from bc 6.0.0  */
+	#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP       0x00060000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP        0x10130000
 
-#define FW_MSG_CODE_LIC_CHALLENGE			0xff010000
-#define FW_MSG_CODE_LIC_RESPONSE			0xff020000
-#define FW_MSG_CODE_VIRT_MAC_PRIM			0xff030000
-#define FW_MSG_CODE_VIRT_MAC_ISCSI			0xff040000
+	#define FW_MSG_CODE_DRV_LOAD_REFUSED            0x10200000
+	#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+	#define FW_MSG_CODE_DRV_UNLOAD_COMMON           0x20100000
+	#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20110000
+	#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20120000
+	#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+	#define FW_MSG_CODE_DCC_DONE                    0x30100000
+	#define FW_MSG_CODE_LLDP_DONE                   0x40100000
+	#define FW_MSG_CODE_DIAG_ENTER_DONE             0x50100000
+	#define FW_MSG_CODE_DIAG_REFUSE                 0x50200000
+	#define FW_MSG_CODE_DIAG_EXIT_DONE              0x60100000
+	#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS        0x70100000
+	#define FW_MSG_CODE_VALIDATE_KEY_FAILURE        0x70200000
+	#define FW_MSG_CODE_GET_KEY_DONE                0x80100000
+	#define FW_MSG_CODE_NO_KEY                      0x80f00000
+	#define FW_MSG_CODE_LIC_INFO_NOT_READY          0x80f80000
+	#define FW_MSG_CODE_L2B_PRAM_LOADED             0x90100000
+	#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE     0x90210000
+	#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE     0x90220000
+	#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE     0x90230000
+	#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE     0x90240000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS        0xa0100000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
+	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
 
-#define FW_MSG_SEQ_NUMBER_MASK				0x0000ffff
+	#define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
+	#define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
+
+	#define FW_MSG_CODE_LINK_CHANGED_ACK            0x01100000
+
+	#define FW_MSG_CODE_LIC_CHALLENGE               0xff010000
+	#define FW_MSG_CODE_LIC_RESPONSE                0xff020000
+	#define FW_MSG_CODE_VIRT_MAC_PRIM               0xff030000
+	#define FW_MSG_CODE_VIRT_MAC_ISCSI              0xff040000
+
+	#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
 
 	u32 fw_mb_param;
 
 	u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK				0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK			0xffff0000
-	/* The system time is in the format of
-	 * (year-2001)*12*32 + month*32 + day. */
-#define DRV_PULSE_ALWAYS_ALIVE				0x00008000
-	/* Indicate to the firmware not to go into the
+	#define DRV_PULSE_SEQ_MASK                      0x00007fff
+	#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+	/*
+	 * The system time is in the format of
+	 * (year-2001)*12*32 + month*32 + day.
+	 */
+	#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+	/*
+	 * Indicate to the firmware not to go into the
 	 * OS-absent when it is not getting driver pulse.
-	 * This is used for debugging as well for PXE(MBA). */
+	 * This is used for debugging as well for PXE(MBA).
+	 */
 
 	u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK				0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE				0x00008000
+	#define MCP_PULSE_SEQ_MASK                      0x00007fff
+	#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
 	/* Indicates to the driver not to assert due to lack
 	 * of MCP response */
-#define MCP_EVENT_MASK					0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ		0x00010000
+	#define MCP_EVENT_MASK                          0xffff0000
+	#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
 
 	u32 iscsi_boot_signature;
 	u32 iscsi_boot_block_offset;
 
 	u32 drv_status;
-#define DRV_STATUS_PMF					0x00000001
-#define DRV_STATUS_SET_MF_BW				0x00000004
+	#define DRV_STATUS_PMF                          0x00000001
+	#define DRV_STATUS_VF_DISABLED                  0x00000002
+	#define DRV_STATUS_SET_MF_BW                    0x00000004
+	#define DRV_STATUS_LINK_EVENT                   0x00000008
 
-#define DRV_STATUS_DCC_EVENT_MASK			0x0000ff00
-#define DRV_STATUS_DCC_DISABLE_ENABLE_PF		0x00000100
-#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION		0x00000200
-#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS		0x00000400
-#define DRV_STATUS_DCC_RESERVED1			0x00000800
-#define DRV_STATUS_DCC_SET_PROTOCOL			0x00001000
-#define DRV_STATUS_DCC_SET_PRIORITY			0x00002000
-#define DRV_STATUS_DCBX_EVENT_MASK			0x000f0000
-#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS		0x00010000
+	#define DRV_STATUS_DCC_EVENT_MASK               0x0000ff00
+	#define DRV_STATUS_DCC_DISABLE_ENABLE_PF        0x00000100
+	#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION     0x00000200
+	#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS       0x00000400
+	#define DRV_STATUS_DCC_RESERVED1                0x00000800
+	#define DRV_STATUS_DCC_SET_PROTOCOL             0x00001000
+	#define DRV_STATUS_DCC_SET_PRIORITY             0x00002000
+
+	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
+	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
 
 	u32 virt_mac_upper;
-#define VIRT_MAC_SIGN_MASK				0xffff0000
-#define VIRT_MAC_SIGNATURE				0x564d0000
+	#define VIRT_MAC_SIGN_MASK                      0xffff0000
+	#define VIRT_MAC_SIGNATURE                      0x564d0000
 	u32 virt_mac_lower;
 
 };
 
 
 /****************************************************************************
- * Management firmware state						    *
+ * Management firmware state                                                *
  ****************************************************************************/
 /* Allocate 440 bytes for management firmware */
-#define MGMTFW_STATE_WORD_SIZE				    110
+#define MGMTFW_STATE_WORD_SIZE                          110
 
 struct mgmtfw_state {
 	u32 opaque[MGMTFW_STATE_WORD_SIZE];
@@ -1026,25 +1368,25 @@
 
 
 /****************************************************************************
- * Multi-Function configuration 					    *
+ * Multi-Function configuration                                             *
  ****************************************************************************/
 struct shared_mf_cfg {
 
 	u32 clp_mb;
-#define SHARED_MF_CLP_SET_DEFAULT		    0x00000000
+	#define SHARED_MF_CLP_SET_DEFAULT               0x00000000
 	/* set by CLP */
-#define SHARED_MF_CLP_EXIT			    0x00000001
+	#define SHARED_MF_CLP_EXIT                      0x00000001
 	/* set by MCP */
-#define SHARED_MF_CLP_EXIT_DONE 		    0x00010000
+	#define SHARED_MF_CLP_EXIT_DONE                 0x00010000
 
 };
 
 struct port_mf_cfg {
 
-	u32 dynamic_cfg;	/* device control channel */
-#define PORT_MF_CFG_E1HOV_TAG_MASK		    0x0000ffff
-#define PORT_MF_CFG_E1HOV_TAG_SHIFT		    0
-#define PORT_MF_CFG_E1HOV_TAG_DEFAULT		    PORT_MF_CFG_E1HOV_TAG_MASK
+	u32 dynamic_cfg;    /* device control channel */
+	#define PORT_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define PORT_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define PORT_MF_CFG_E1HOV_TAG_DEFAULT         PORT_MF_CFG_E1HOV_TAG_MASK
 
 	u32 reserved[3];
 
@@ -1055,57 +1397,58 @@
 	u32 config;
 	/* E/R/I/D */
 	/* function 0 of each port cannot be hidden */
-#define FUNC_MF_CFG_FUNC_HIDE			    0x00000001
+	#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
 
-#define FUNC_MF_CFG_PROTOCOL_MASK		    0x00000007
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET		    0x00000002
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA     0x00000004
-#define FUNC_MF_CFG_PROTOCOL_ISCSI		    0x00000006
-#define FUNC_MF_CFG_PROTOCOL_DEFAULT\
-	FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+	#define FUNC_MF_CFG_PROTOCOL_MASK               0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000000
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000002
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+	#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+				FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
 
-#define FUNC_MF_CFG_FUNC_DISABLED		    0x00000008
+	#define FUNC_MF_CFG_FUNC_DISABLED               0x00000008
+	#define FUNC_MF_CFG_FUNC_DELETED                0x00000010
 
 	/* PRI */
 	/* 0 - low priority, 3 - high priority */
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK	    0x00000300
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT	    8
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT	    0x00000000
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK      0x00000300
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT     8
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT   0x00000000
 
 	/* MINBW, MAXBW */
 	/* value range - 0..100, increments in 100Mbps */
-#define FUNC_MF_CFG_MIN_BW_MASK 		    0x00ff0000
-#define FUNC_MF_CFG_MIN_BW_SHIFT		    16
-#define FUNC_MF_CFG_MIN_BW_DEFAULT		    0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 		    0xff000000
-#define FUNC_MF_CFG_MAX_BW_SHIFT		    24
-#define FUNC_MF_CFG_MAX_BW_DEFAULT		    0x64000000
+	#define FUNC_MF_CFG_MIN_BW_MASK                 0x00ff0000
+	#define FUNC_MF_CFG_MIN_BW_SHIFT                16
+	#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+	#define FUNC_MF_CFG_MAX_BW_MASK                 0xff000000
+	#define FUNC_MF_CFG_MAX_BW_SHIFT                24
+	#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x64000000
 
-	u32 mac_upper;		/* MAC */
-#define FUNC_MF_CFG_UPPERMAC_MASK		    0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT		    0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT		    FUNC_MF_CFG_UPPERMAC_MASK
+	u32 mac_upper;	    /* MAC */
+	#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+	#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+	#define FUNC_MF_CFG_UPPERMAC_DEFAULT           FUNC_MF_CFG_UPPERMAC_MASK
 	u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT		    0xffffffff
+	#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
 
 	u32 e1hov_tag;	/* VNI */
-#define FUNC_MF_CFG_E1HOV_TAG_MASK		    0x0000ffff
-#define FUNC_MF_CFG_E1HOV_TAG_SHIFT		    0
-#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT		    FUNC_MF_CFG_E1HOV_TAG_MASK
+	#define FUNC_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
 
 	u32 reserved[2];
-
 };
 
 /* This structure is not applicable and should not be accessed on 57711 */
 struct func_ext_cfg {
 	u32 func_cfg;
-#define MACP_FUNC_CFG_FLAGS_MASK			      0x000000FF
-#define MACP_FUNC_CFG_FLAGS_SHIFT			      0
-#define MACP_FUNC_CFG_FLAGS_ENABLED			      0x00000001
-#define MACP_FUNC_CFG_FLAGS_ETHERNET			      0x00000002
-#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD		      0x00000004
-#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD		      0x00000008
+	#define MACP_FUNC_CFG_FLAGS_MASK                0x000000FF
+	#define MACP_FUNC_CFG_FLAGS_SHIFT               0
+	#define MACP_FUNC_CFG_FLAGS_ENABLED             0x00000001
+	#define MACP_FUNC_CFG_FLAGS_ETHERNET            0x00000002
+	#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD       0x00000004
+	#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD        0x00000008
 
 	u32 iscsi_mac_addr_upper;
 	u32 iscsi_mac_addr_lower;
@@ -1120,73 +1463,99 @@
 	u32 fcoe_wwn_node_name_lower;
 
 	u32 preserve_data;
-#define MF_FUNC_CFG_PRESERVE_L2_MAC			     (1<<0)
-#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC			     (1<<1)
-#define MF_FUNC_CFG_PRESERVE_FCOE_MAC			     (1<<2)
-#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P			     (1<<3)
-#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N			     (1<<4)
+	#define MF_FUNC_CFG_PRESERVE_L2_MAC             (1<<0)
+	#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC          (1<<1)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_MAC           (1<<2)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P         (1<<3)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N         (1<<4)
+	#define MF_FUNC_CFG_PRESERVE_TX_BW              (1<<5)
 };
 
 struct mf_cfg {
 
-	struct shared_mf_cfg	shared_mf_config;
-	struct port_mf_cfg	port_mf_config[PORT_MAX];
-	struct func_mf_cfg	func_mf_config[E1H_FUNC_MAX];
-
-	struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
-};
-
+	struct shared_mf_cfg    shared_mf_config;       /* 0x4 */
+	struct port_mf_cfg  port_mf_config[PORT_MAX];   /* 0x10 * 2 = 0x20 */
+	/* for all chips, there are 8 mf functions */
+	struct func_mf_cfg  func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+	/*
+	 * Extended configuration per function  - this array does not exist and
+	 * should not be accessed on 57711
+	 */
+	struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
 
 /****************************************************************************
- * Shared Memory Region 						    *
+ * Shared Memory Region                                                     *
  ****************************************************************************/
-struct shmem_region {			       /*   SharedMem Offset (size) */
+struct shmem_region {		       /*   SharedMem Offset (size) */
 
-	u32			validity_map[PORT_MAX];  /* 0x0 (4*2 = 0x8) */
-#define SHR_MEM_FORMAT_REV_ID			    ('A'<<24)
-#define SHR_MEM_FORMAT_REV_MASK 		    0xff000000
+	u32         validity_map[PORT_MAX];  /* 0x0 (4*2 = 0x8) */
+	#define SHR_MEM_FORMAT_REV_MASK                     0xff000000
+	#define SHR_MEM_FORMAT_REV_ID                       ('A'<<24)
 	/* validity bits */
-#define SHR_MEM_VALIDITY_PCI_CFG		    0x00100000
-#define SHR_MEM_VALIDITY_MB			    0x00200000
-#define SHR_MEM_VALIDITY_DEV_INFO		    0x00400000
-#define SHR_MEM_VALIDITY_RESERVED		    0x00000007
+	#define SHR_MEM_VALIDITY_PCI_CFG                    0x00100000
+	#define SHR_MEM_VALIDITY_MB                         0x00200000
+	#define SHR_MEM_VALIDITY_DEV_INFO                   0x00400000
+	#define SHR_MEM_VALIDITY_RESERVED                   0x00000007
 	/* One licensing bit should be set */
-#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
-#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
-#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
-#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT	    0x00000020
+	#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
+	#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+	#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+	#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
 	/* Active MFW */
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN	    0x00000000
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI	    0x00000040
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 	    0x00000080
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI	    0x000000c0
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE	    0x000001c0
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK	    0x000001c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI            0x00000040
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP             0x00000080
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI            0x000000c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
 
-	struct shm_dev_info	dev_info;		 /* 0x8     (0x438) */
+	struct shm_dev_info dev_info;	     /* 0x8     (0x438) */
 
-	struct license_key	drv_lic_key[PORT_MAX];	/* 0x440 (52*2=0x68) */
+	struct license_key       drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
 
 	/* FW information (for internal FW use) */
-	u32			fw_info_fio_offset;    /* 0x4a8       (0x4) */
-	struct mgmtfw_state	mgmtfw_state;	       /* 0x4ac     (0x1b8) */
+	u32         fw_info_fio_offset;		/* 0x4a8       (0x4) */
+	struct mgmtfw_state mgmtfw_state;	/* 0x4ac     (0x1b8) */
 
-	struct drv_port_mb	port_mb[PORT_MAX];     /* 0x664 (16*2=0x20) */
-	struct drv_func_mb	func_mb[];	       /* 0x684
-					     (44*2/4/8=0x58/0xb0/0x160) */
+	struct drv_port_mb  port_mb[PORT_MAX];	/* 0x664 (16*2=0x20) */
+
+#ifdef BMAPI
+	/* This is a variable length array */
+	/* the number of function depends on the chip type */
+	struct drv_func_mb func_mb[1];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+	/* the number of function depends on the chip type */
+	struct drv_func_mb  func_mb[];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
 
 }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
 
+/****************************************************************************
+ * Shared Memory 2 Region                                                   *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 64 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
 struct fw_flr_ack {
-	u32	pf_ack;
-	u32	vf_ack[1];
-	u32	iov_dis_ack;
+	u32         pf_ack;
+	u32         vf_ack[1];
+	u32         iov_dis_ack;
 };
 
 struct fw_flr_mb {
-	u32	aggint;
-	u32	opgen_addr;
-	struct	fw_flr_ack ack;
+	u32         aggint;
+	u32         opgen_addr;
+	struct fw_flr_ack ack;
 };
 
 /**** SUPPORT FOR SHMEM ARRRAYS ***
@@ -1210,36 +1579,36 @@
  *
  * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
  *
- *		|		|		|		|
- *   0	|   1	|   2	|   3	|   4	|   5	|   6	|   7	|
- *		|		|		|		|
+ *                |                |                |               |
+ *   0    |   1   |   2    |   3   |   4    |   5   |   6   |   7   |
+ *                |                |                |               |
  *
  * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
  *
- *		|		|		|		|
- *   1	|   0	|   3	|   2	|   5	|   4	|   7	|   6	|
- *		|		|		|		|
+ *                |                |                |               |
+ *   1   |   0    |   3    |   2   |   5    |   4   |   7   |   6   |
+ *                |                |                |               |
  *
  * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
  *
- *		|		|		|		|
- *   3	|   2	|   1	|   0	|   7	|   6	|   5	|   4	|
- *		|		|		|		|
+ *                |                |                |               |
+ *   3   |   2    |   1   |   0    |   7   |   6    |   5   |   4   |
+ *                |                |                |               |
  */
 #define SHMEM_ARRAY_BITPOS(i, eb, fb)	\
 	((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
 	(((i)%((fb)/(eb))) * (eb)))
 
-#define SHMEM_ARRAY_GET(a, i, eb, fb)					   \
+#define SHMEM_ARRAY_GET(a, i, eb, fb)					\
 	((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) &  \
 	SHMEM_ARRAY_MASK(eb))
 
-#define SHMEM_ARRAY_SET(a, i, eb, fb, val)				   \
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val)				\
 do {									   \
 	a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) <<	   \
-	SHMEM_ARRAY_BITPOS(i, eb, fb));				   \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
 	a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) <<  \
-	SHMEM_ARRAY_BITPOS(i, eb, fb));				   \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
 } while (0)
 
 
@@ -1263,23 +1632,30 @@
 #define ISCSI_APP_IDX			1
 #define PREDEFINED_APP_IDX_MAX		2
 
+
+/* Big/Little endian have the same representation. */
 struct dcbx_ets_feature {
+	/*
+	 * For Admin MIB - is this feature supported by the
+	 * driver | For Local MIB - should this feature be enabled.
+	 */
 	u32 enabled;
 	u32  pg_bw_tbl[2];
 	u32  pri_pg_tbl[1];
 };
 
+/* Driver structure in LE */
 struct dcbx_pfc_feature {
 #ifdef __BIG_ENDIAN
 	u8 pri_en_bitmap;
-#define DCBX_PFC_PRI_0 0x01
-#define DCBX_PFC_PRI_1 0x02
-#define DCBX_PFC_PRI_2 0x04
-#define DCBX_PFC_PRI_3 0x08
-#define DCBX_PFC_PRI_4 0x10
-#define DCBX_PFC_PRI_5 0x20
-#define DCBX_PFC_PRI_6 0x40
-#define DCBX_PFC_PRI_7 0x80
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
 	u8 pfc_caps;
 	u8 reserved;
 	u8 enabled;
@@ -1288,39 +1664,41 @@
 	u8 reserved;
 	u8 pfc_caps;
 	u8 pri_en_bitmap;
-#define DCBX_PFC_PRI_0 0x01
-#define DCBX_PFC_PRI_1 0x02
-#define DCBX_PFC_PRI_2 0x04
-#define DCBX_PFC_PRI_3 0x08
-#define DCBX_PFC_PRI_4 0x10
-#define DCBX_PFC_PRI_5 0x20
-#define DCBX_PFC_PRI_6 0x40
-#define DCBX_PFC_PRI_7 0x80
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
 #endif
 };
 
 struct dcbx_app_priority_entry {
 #ifdef __BIG_ENDIAN
-	u16	app_id;
-	u8	pri_bitmap;
-	u8	appBitfield;
-#define DCBX_APP_ENTRY_VALID	     0x01
-#define DCBX_APP_ENTRY_SF_MASK	     0x30
-#define DCBX_APP_ENTRY_SF_SHIFT	     4
-#define DCBX_APP_SF_ETH_TYPE	     0x10
-#define DCBX_APP_SF_PORT	     0x20
+	u16  app_id;
+	u8  pri_bitmap;
+	u8  appBitfield;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
 #elif defined(__LITTLE_ENDIAN)
 	u8 appBitfield;
-#define DCBX_APP_ENTRY_VALID	     0x01
-#define DCBX_APP_ENTRY_SF_MASK	     0x30
-#define DCBX_APP_ENTRY_SF_SHIFT	     4
-#define DCBX_APP_SF_ETH_TYPE	     0x10
-#define DCBX_APP_SF_PORT	     0x20
-	u8	pri_bitmap;
-	u16	app_id;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
+	u8  pri_bitmap;
+	u16  app_id;
 #endif
 };
 
+
+/* FW structure in BE */
 struct dcbx_app_priority_feature {
 #ifdef __BIG_ENDIAN
 	u8 reserved;
@@ -1336,302 +1714,402 @@
 	struct dcbx_app_priority_entry  app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
 };
 
+/* FW structure in BE */
 struct dcbx_features {
+	/* PG feature */
 	struct dcbx_ets_feature ets;
+	/* PFC feature */
 	struct dcbx_pfc_feature pfc;
+	/* APP feature */
 	struct dcbx_app_priority_feature app;
 };
 
+/* LLDP protocol parameters */
+/* FW structure in BE */
 struct lldp_params {
 #ifdef __BIG_ENDIAN
-	u8	msg_fast_tx_interval;
-	u8	msg_tx_hold;
-	u8	msg_tx_interval;
-	u8	admin_status;
-#define LLDP_TX_ONLY  0x01
-#define LLDP_RX_ONLY  0x02
-#define LLDP_TX_RX    0x03
-#define LLDP_DISABLED 0x04
-	u8	reserved1;
-	u8	tx_fast;
-	u8	tx_crd_max;
-	u8	tx_crd;
+	u8  msg_fast_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_tx_interval;
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  reserved1;
+	u8  tx_fast;
+	u8  tx_crd_max;
+	u8  tx_crd;
 #elif defined(__LITTLE_ENDIAN)
-	u8	admin_status;
-#define LLDP_TX_ONLY  0x01
-#define LLDP_RX_ONLY  0x02
-#define LLDP_TX_RX    0x03
-#define LLDP_DISABLED 0x04
-	u8	msg_tx_interval;
-	u8	msg_tx_hold;
-	u8	msg_fast_tx_interval;
-	u8	tx_crd;
-	u8	tx_crd_max;
-	u8	tx_fast;
-	u8	reserved1;
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  msg_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_fast_tx_interval;
+	u8  tx_crd;
+	u8  tx_crd_max;
+	u8  tx_fast;
+	u8  reserved1;
 #endif
-#define REM_CHASSIS_ID_STAT_LEN	4
-#define REM_PORT_ID_STAT_LEN 4
+	#define REM_CHASSIS_ID_STAT_LEN 4
+	#define REM_PORT_ID_STAT_LEN 4
+	/* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
 	u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+	/* Holds remote Port ID TLV header, subtype and 9B of payload. */
 	u32 peer_port_id[REM_PORT_ID_STAT_LEN];
 };
 
 struct lldp_dcbx_stat {
-#define LOCAL_CHASSIS_ID_STAT_LEN 2
-#define LOCAL_PORT_ID_STAT_LEN 2
+	#define LOCAL_CHASSIS_ID_STAT_LEN 2
+	#define LOCAL_PORT_ID_STAT_LEN 2
+	/* Holds local Chassis ID 8B payload of constant subtype 4. */
 	u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+	/* Holds local Port ID 8B payload of constant subtype 3. */
 	u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+	/* Number of DCBX frames transmitted. */
 	u32 num_tx_dcbx_pkts;
+	/* Number of DCBX frames received. */
 	u32 num_rx_dcbx_pkts;
 };
 
+/* ADMIN MIB - DCBX local machine default configuration. */
 struct lldp_admin_mib {
-	u32	ver_cfg_flags;
-#define DCBX_ETS_CONFIG_TX_ENABLED	0x00000001
-#define DCBX_PFC_CONFIG_TX_ENABLED	0x00000002
-#define DCBX_APP_CONFIG_TX_ENABLED	0x00000004
-#define DCBX_ETS_RECO_TX_ENABLED	0x00000008
-#define DCBX_ETS_RECO_VALID		0x00000010
-#define DCBX_ETS_WILLING		0x00000020
-#define DCBX_PFC_WILLING		0x00000040
-#define DCBX_APP_WILLING		0x00000080
-#define DCBX_VERSION_CEE		0x00000100
-#define DCBX_VERSION_IEEE		0x00000200
-#define DCBX_DCBX_ENABLED		0x00000400
-#define DCBX_CEE_VERSION_MASK		0x0000f000
-#define DCBX_CEE_VERSION_SHIFT		12
-#define DCBX_CEE_MAX_VERSION_MASK	0x000f0000
-#define DCBX_CEE_MAX_VERSION_SHIFT	16
-	struct dcbx_features	features;
+	u32     ver_cfg_flags;
+	#define DCBX_ETS_CONFIG_TX_ENABLED       0x00000001
+	#define DCBX_PFC_CONFIG_TX_ENABLED       0x00000002
+	#define DCBX_APP_CONFIG_TX_ENABLED       0x00000004
+	#define DCBX_ETS_RECO_TX_ENABLED         0x00000008
+	#define DCBX_ETS_RECO_VALID              0x00000010
+	#define DCBX_ETS_WILLING                 0x00000020
+	#define DCBX_PFC_WILLING                 0x00000040
+	#define DCBX_APP_WILLING                 0x00000080
+	#define DCBX_VERSION_CEE                 0x00000100
+	#define DCBX_VERSION_IEEE                0x00000200
+	#define DCBX_DCBX_ENABLED                0x00000400
+	#define DCBX_CEE_VERSION_MASK            0x0000f000
+	#define DCBX_CEE_VERSION_SHIFT           12
+	#define DCBX_CEE_MAX_VERSION_MASK        0x000f0000
+	#define DCBX_CEE_MAX_VERSION_SHIFT       16
+	struct dcbx_features     features;
 };
 
+/* REMOTE MIB - remote machine DCBX configuration. */
 struct lldp_remote_mib {
 	u32 prefix_seq_num;
 	u32 flags;
-#define DCBX_ETS_TLV_RX	    0x00000001
-#define DCBX_PFC_TLV_RX	    0x00000002
-#define DCBX_APP_TLV_RX	    0x00000004
-#define DCBX_ETS_RX_ERROR   0x00000010
-#define DCBX_PFC_RX_ERROR   0x00000020
-#define DCBX_APP_RX_ERROR   0x00000040
-#define DCBX_ETS_REM_WILLING	0x00000100
-#define DCBX_PFC_REM_WILLING	0x00000200
-#define DCBX_APP_REM_WILLING	0x00000400
-#define DCBX_REMOTE_ETS_RECO_VALID  0x00001000
+	#define DCBX_ETS_TLV_RX                  0x00000001
+	#define DCBX_PFC_TLV_RX                  0x00000002
+	#define DCBX_APP_TLV_RX                  0x00000004
+	#define DCBX_ETS_RX_ERROR                0x00000010
+	#define DCBX_PFC_RX_ERROR                0x00000020
+	#define DCBX_APP_RX_ERROR                0x00000040
+	#define DCBX_ETS_REM_WILLING             0x00000100
+	#define DCBX_PFC_REM_WILLING             0x00000200
+	#define DCBX_APP_REM_WILLING             0x00000400
+	#define DCBX_REMOTE_ETS_RECO_VALID       0x00001000
+	#define DCBX_REMOTE_MIB_VALID            0x00002000
 	struct dcbx_features features;
 	u32 suffix_seq_num;
 };
 
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
 struct lldp_local_mib {
 	u32 prefix_seq_num;
+	/* Indicates if there is mismatch with negotiation results. */
 	u32 error;
-#define DCBX_LOCAL_ETS_ERROR	 0x00000001
-#define DCBX_LOCAL_PFC_ERROR	 0x00000002
-#define DCBX_LOCAL_APP_ERROR	 0x00000004
-#define DCBX_LOCAL_PFC_MISMATCH	 0x00000010
-#define DCBX_LOCAL_APP_MISMATCH	 0x00000020
+	#define DCBX_LOCAL_ETS_ERROR             0x00000001
+	#define DCBX_LOCAL_PFC_ERROR             0x00000002
+	#define DCBX_LOCAL_APP_ERROR             0x00000004
+	#define DCBX_LOCAL_PFC_MISMATCH          0x00000010
+	#define DCBX_LOCAL_APP_MISMATCH          0x00000020
 	struct dcbx_features   features;
 	u32 suffix_seq_num;
 };
 /***END OF DCBX STRUCTURES DECLARATIONS***/
 
+struct ncsi_oem_fcoe_features {
+	u32 fcoe_features1;
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET        0
+
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_MASK             0xFFFF0000
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET           16
+
+	u32 fcoe_features2;
+	#define FCOE_FEATURES2_EXCHANGES_MASK                   0x0000FFFF
+	#define FCOE_FEATURES2_EXCHANGES_OFFSET                 0
+
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK           0xFFFF0000
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET         16
+
+	u32 fcoe_features3;
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK           0x0000FFFF
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET         0
+
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK        0xFFFF0000
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET      16
+
+	u32 fcoe_features4;
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_MASK            0x0000000F
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
+};
+
+struct ncsi_oem_data {
+	u32 driver_version[4];
+	struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
 struct shmem2_region {
 
-	u32			size;
+	u32 size;					/* 0x0000 */
 
-	u32			dcc_support;
-#define SHMEM_DCC_SUPPORT_NONE			    0x00000000
-#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV     0x00000001
-#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV  0x00000004
-#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV    0x00000008
-#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV	    0x00000040
-#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV	    0x00000080
-#define SHMEM_DCC_SUPPORT_DEFAULT		    SHMEM_DCC_SUPPORT_NONE
-	u32 ext_phy_fw_version2[PORT_MAX];
+	u32 dcc_support;				/* 0x0004 */
+	#define SHMEM_DCC_SUPPORT_NONE                      0x00000000
+	#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV     0x00000001
+	#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV  0x00000004
+	#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV    0x00000008
+	#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV          0x00000040
+	#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV          0x00000080
+
+	u32 ext_phy_fw_version2[PORT_MAX];		/* 0x0008 */
 	/*
 	 * For backwards compatibility, if the mf_cfg_addr does not exist
 	 * (the size filed is smaller than 0xc) the mf_cfg resides at the
 	 * end of struct shmem_region
-     */
-	u32	mf_cfg_addr;
-#define SHMEM_MF_CFG_ADDR_NONE			    0x00000000
+	 */
+	u32 mf_cfg_addr;				/* 0x0010 */
+	#define SHMEM_MF_CFG_ADDR_NONE                  0x00000000
 
-	struct fw_flr_mb flr_mb;
-	u32	dcbx_lldp_params_offset;
-#define SHMEM_LLDP_DCBX_PARAMS_NONE		    0x00000000
-	u32	dcbx_neg_res_offset;
-#define SHMEM_DCBX_NEG_RES_NONE			    0x00000000
-	u32	dcbx_remote_mib_offset;
-#define SHMEM_DCBX_REMOTE_MIB_NONE		    0x00000000
+	struct fw_flr_mb flr_mb;			/* 0x0014 */
+	u32 dcbx_lldp_params_offset;			/* 0x0028 */
+	#define SHMEM_LLDP_DCBX_PARAMS_NONE             0x00000000
+	u32 dcbx_neg_res_offset;			/* 0x002c */
+	#define SHMEM_DCBX_NEG_RES_NONE			0x00000000
+	u32 dcbx_remote_mib_offset;			/* 0x0030 */
+	#define SHMEM_DCBX_REMOTE_MIB_NONE              0x00000000
 	/*
 	 * The other shmemX_base_addr holds the other path's shmem address
 	 * required for example in case of common phy init, or for path1 to know
 	 * the address of mcp debug trace which is located in offset from shmem
 	 * of path0
 	 */
-	u32 other_shmem_base_addr;
-	u32 other_shmem2_base_addr;
-	u32	reserved1[E2_VF_MAX / 32];
-	u32	reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
-	u32	dcbx_lldp_dcbx_stat_offset;
-#define SHMEM_LLDP_DCBX_STAT_NONE		   0x00000000
+	u32 other_shmem_base_addr;			/* 0x0034 */
+	u32 other_shmem2_base_addr;			/* 0x0038 */
+	/*
+	 * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+	 * which were disabled/flred
+	 */
+	u32 mcp_vf_disabled[E2_VF_MAX / 32];		/* 0x003c */
+
+	/*
+	 * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+	 * VFs
+	 */
+	u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+	u32 dcbx_lldp_dcbx_stat_offset;			/* 0x0064 */
+	#define SHMEM_LLDP_DCBX_STAT_NONE               0x00000000
+
+	/*
+	 * edebug_driver_if field is used to transfer messages between edebug
+	 * app to the driver through shmem2.
+	 *
+	 * message format:
+	 * bits 0-2 -  function number / instance of driver to perform request
+	 * bits 3-5 -  op code / is_ack?
+	 * bits 6-63 - data
+	 */
+	u32 edebug_driver_if[2];			/* 0x0068 */
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR  1
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR   2
+	#define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT   3
+
+	u32 nvm_retain_bitmap_addr;			/* 0x0070 */
+
+	u32	reserved1;	/* 0x0074 */
+
+	u32	reserved2[E2_FUNC_MAX];
+
+	u32	reserved3[E2_FUNC_MAX];/* 0x0088 */
+	u32	reserved4[E2_FUNC_MAX];/* 0x0098 */
+
+	u32 swim_base_addr;				/* 0x0108 */
+	u32 swim_funcs;
+	u32 swim_main_cb;
+
+	u32	reserved5[2];
+
+	/* generic flags controlled by the driver */
+	u32 drv_flags;
+	#define DRV_FLAGS_DCB_CONFIGURED                0x1
+
+	/* pointer to extended dev_info shared data copied from nvm image */
+	u32 extended_dev_info_shared_addr;
+	u32 ncsi_oem_data_addr;
+
+	u32 ocsd_host_addr;
+	u32 ocbb_host_addr;
+	u32 ocsd_req_update_interval;
 };
 
 
 struct emac_stats {
-    u32     rx_stat_ifhcinoctets;
-    u32     rx_stat_ifhcinbadoctets;
-    u32     rx_stat_etherstatsfragments;
-    u32     rx_stat_ifhcinucastpkts;
-    u32     rx_stat_ifhcinmulticastpkts;
-    u32     rx_stat_ifhcinbroadcastpkts;
-    u32     rx_stat_dot3statsfcserrors;
-    u32     rx_stat_dot3statsalignmenterrors;
-    u32     rx_stat_dot3statscarriersenseerrors;
-    u32     rx_stat_xonpauseframesreceived;
-    u32     rx_stat_xoffpauseframesreceived;
-    u32     rx_stat_maccontrolframesreceived;
-    u32     rx_stat_xoffstateentered;
-    u32     rx_stat_dot3statsframestoolong;
-    u32     rx_stat_etherstatsjabbers;
-    u32     rx_stat_etherstatsundersizepkts;
-    u32     rx_stat_etherstatspkts64octets;
-    u32     rx_stat_etherstatspkts65octetsto127octets;
-    u32     rx_stat_etherstatspkts128octetsto255octets;
-    u32     rx_stat_etherstatspkts256octetsto511octets;
-    u32     rx_stat_etherstatspkts512octetsto1023octets;
-    u32     rx_stat_etherstatspkts1024octetsto1522octets;
-    u32     rx_stat_etherstatspktsover1522octets;
+	u32     rx_stat_ifhcinoctets;
+	u32     rx_stat_ifhcinbadoctets;
+	u32     rx_stat_etherstatsfragments;
+	u32     rx_stat_ifhcinucastpkts;
+	u32     rx_stat_ifhcinmulticastpkts;
+	u32     rx_stat_ifhcinbroadcastpkts;
+	u32     rx_stat_dot3statsfcserrors;
+	u32     rx_stat_dot3statsalignmenterrors;
+	u32     rx_stat_dot3statscarriersenseerrors;
+	u32     rx_stat_xonpauseframesreceived;
+	u32     rx_stat_xoffpauseframesreceived;
+	u32     rx_stat_maccontrolframesreceived;
+	u32     rx_stat_xoffstateentered;
+	u32     rx_stat_dot3statsframestoolong;
+	u32     rx_stat_etherstatsjabbers;
+	u32     rx_stat_etherstatsundersizepkts;
+	u32     rx_stat_etherstatspkts64octets;
+	u32     rx_stat_etherstatspkts65octetsto127octets;
+	u32     rx_stat_etherstatspkts128octetsto255octets;
+	u32     rx_stat_etherstatspkts256octetsto511octets;
+	u32     rx_stat_etherstatspkts512octetsto1023octets;
+	u32     rx_stat_etherstatspkts1024octetsto1522octets;
+	u32     rx_stat_etherstatspktsover1522octets;
 
-    u32     rx_stat_falsecarriererrors;
+	u32     rx_stat_falsecarriererrors;
 
-    u32     tx_stat_ifhcoutoctets;
-    u32     tx_stat_ifhcoutbadoctets;
-    u32     tx_stat_etherstatscollisions;
-    u32     tx_stat_outxonsent;
-    u32     tx_stat_outxoffsent;
-    u32     tx_stat_flowcontroldone;
-    u32     tx_stat_dot3statssinglecollisionframes;
-    u32     tx_stat_dot3statsmultiplecollisionframes;
-    u32     tx_stat_dot3statsdeferredtransmissions;
-    u32     tx_stat_dot3statsexcessivecollisions;
-    u32     tx_stat_dot3statslatecollisions;
-    u32     tx_stat_ifhcoutucastpkts;
-    u32     tx_stat_ifhcoutmulticastpkts;
-    u32     tx_stat_ifhcoutbroadcastpkts;
-    u32     tx_stat_etherstatspkts64octets;
-    u32     tx_stat_etherstatspkts65octetsto127octets;
-    u32     tx_stat_etherstatspkts128octetsto255octets;
-    u32     tx_stat_etherstatspkts256octetsto511octets;
-    u32     tx_stat_etherstatspkts512octetsto1023octets;
-    u32     tx_stat_etherstatspkts1024octetsto1522octets;
-    u32     tx_stat_etherstatspktsover1522octets;
-    u32     tx_stat_dot3statsinternalmactransmiterrors;
+	u32     tx_stat_ifhcoutoctets;
+	u32     tx_stat_ifhcoutbadoctets;
+	u32     tx_stat_etherstatscollisions;
+	u32     tx_stat_outxonsent;
+	u32     tx_stat_outxoffsent;
+	u32     tx_stat_flowcontroldone;
+	u32     tx_stat_dot3statssinglecollisionframes;
+	u32     tx_stat_dot3statsmultiplecollisionframes;
+	u32     tx_stat_dot3statsdeferredtransmissions;
+	u32     tx_stat_dot3statsexcessivecollisions;
+	u32     tx_stat_dot3statslatecollisions;
+	u32     tx_stat_ifhcoutucastpkts;
+	u32     tx_stat_ifhcoutmulticastpkts;
+	u32     tx_stat_ifhcoutbroadcastpkts;
+	u32     tx_stat_etherstatspkts64octets;
+	u32     tx_stat_etherstatspkts65octetsto127octets;
+	u32     tx_stat_etherstatspkts128octetsto255octets;
+	u32     tx_stat_etherstatspkts256octetsto511octets;
+	u32     tx_stat_etherstatspkts512octetsto1023octets;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets;
+	u32     tx_stat_etherstatspktsover1522octets;
+	u32     tx_stat_dot3statsinternalmactransmiterrors;
 };
 
 
 struct bmac1_stats {
-    u32     tx_stat_gtpkt_lo;
-    u32     tx_stat_gtpkt_hi;
-    u32     tx_stat_gtxpf_lo;
-    u32     tx_stat_gtxpf_hi;
-    u32     tx_stat_gtfcs_lo;
-    u32     tx_stat_gtfcs_hi;
-    u32     tx_stat_gtmca_lo;
-    u32     tx_stat_gtmca_hi;
-    u32     tx_stat_gtbca_lo;
-    u32     tx_stat_gtbca_hi;
-    u32     tx_stat_gtfrg_lo;
-    u32     tx_stat_gtfrg_hi;
-    u32     tx_stat_gtovr_lo;
-    u32     tx_stat_gtovr_hi;
-    u32     tx_stat_gt64_lo;
-    u32     tx_stat_gt64_hi;
-    u32     tx_stat_gt127_lo;
-    u32     tx_stat_gt127_hi;
-    u32     tx_stat_gt255_lo;
-    u32     tx_stat_gt255_hi;
-    u32     tx_stat_gt511_lo;
-    u32     tx_stat_gt511_hi;
-    u32     tx_stat_gt1023_lo;
-    u32     tx_stat_gt1023_hi;
-    u32     tx_stat_gt1518_lo;
-    u32     tx_stat_gt1518_hi;
-    u32     tx_stat_gt2047_lo;
-    u32     tx_stat_gt2047_hi;
-    u32     tx_stat_gt4095_lo;
-    u32     tx_stat_gt4095_hi;
-    u32     tx_stat_gt9216_lo;
-    u32     tx_stat_gt9216_hi;
-    u32     tx_stat_gt16383_lo;
-    u32     tx_stat_gt16383_hi;
-    u32     tx_stat_gtmax_lo;
-    u32     tx_stat_gtmax_hi;
-    u32     tx_stat_gtufl_lo;
-    u32     tx_stat_gtufl_hi;
-    u32     tx_stat_gterr_lo;
-    u32     tx_stat_gterr_hi;
-    u32     tx_stat_gtbyt_lo;
-    u32     tx_stat_gtbyt_hi;
+	u32	tx_stat_gtpkt_lo;
+	u32	tx_stat_gtpkt_hi;
+	u32	tx_stat_gtxpf_lo;
+	u32	tx_stat_gtxpf_hi;
+	u32	tx_stat_gtfcs_lo;
+	u32	tx_stat_gtfcs_hi;
+	u32	tx_stat_gtmca_lo;
+	u32	tx_stat_gtmca_hi;
+	u32	tx_stat_gtbca_lo;
+	u32	tx_stat_gtbca_hi;
+	u32	tx_stat_gtfrg_lo;
+	u32	tx_stat_gtfrg_hi;
+	u32	tx_stat_gtovr_lo;
+	u32	tx_stat_gtovr_hi;
+	u32	tx_stat_gt64_lo;
+	u32	tx_stat_gt64_hi;
+	u32	tx_stat_gt127_lo;
+	u32	tx_stat_gt127_hi;
+	u32	tx_stat_gt255_lo;
+	u32	tx_stat_gt255_hi;
+	u32	tx_stat_gt511_lo;
+	u32	tx_stat_gt511_hi;
+	u32	tx_stat_gt1023_lo;
+	u32	tx_stat_gt1023_hi;
+	u32	tx_stat_gt1518_lo;
+	u32	tx_stat_gt1518_hi;
+	u32	tx_stat_gt2047_lo;
+	u32	tx_stat_gt2047_hi;
+	u32	tx_stat_gt4095_lo;
+	u32	tx_stat_gt4095_hi;
+	u32	tx_stat_gt9216_lo;
+	u32	tx_stat_gt9216_hi;
+	u32	tx_stat_gt16383_lo;
+	u32	tx_stat_gt16383_hi;
+	u32	tx_stat_gtmax_lo;
+	u32	tx_stat_gtmax_hi;
+	u32	tx_stat_gtufl_lo;
+	u32	tx_stat_gtufl_hi;
+	u32	tx_stat_gterr_lo;
+	u32	tx_stat_gterr_hi;
+	u32	tx_stat_gtbyt_lo;
+	u32	tx_stat_gtbyt_hi;
 
-    u32     rx_stat_gr64_lo;
-    u32     rx_stat_gr64_hi;
-    u32     rx_stat_gr127_lo;
-    u32     rx_stat_gr127_hi;
-    u32     rx_stat_gr255_lo;
-    u32     rx_stat_gr255_hi;
-    u32     rx_stat_gr511_lo;
-    u32     rx_stat_gr511_hi;
-    u32     rx_stat_gr1023_lo;
-    u32     rx_stat_gr1023_hi;
-    u32     rx_stat_gr1518_lo;
-    u32     rx_stat_gr1518_hi;
-    u32     rx_stat_gr2047_lo;
-    u32     rx_stat_gr2047_hi;
-    u32     rx_stat_gr4095_lo;
-    u32     rx_stat_gr4095_hi;
-    u32     rx_stat_gr9216_lo;
-    u32     rx_stat_gr9216_hi;
-    u32     rx_stat_gr16383_lo;
-    u32     rx_stat_gr16383_hi;
-    u32     rx_stat_grmax_lo;
-    u32     rx_stat_grmax_hi;
-    u32     rx_stat_grpkt_lo;
-    u32     rx_stat_grpkt_hi;
-    u32     rx_stat_grfcs_lo;
-    u32     rx_stat_grfcs_hi;
-    u32     rx_stat_grmca_lo;
-    u32     rx_stat_grmca_hi;
-    u32     rx_stat_grbca_lo;
-    u32     rx_stat_grbca_hi;
-    u32     rx_stat_grxcf_lo;
-    u32     rx_stat_grxcf_hi;
-    u32     rx_stat_grxpf_lo;
-    u32     rx_stat_grxpf_hi;
-    u32     rx_stat_grxuo_lo;
-    u32     rx_stat_grxuo_hi;
-    u32     rx_stat_grjbr_lo;
-    u32     rx_stat_grjbr_hi;
-    u32     rx_stat_grovr_lo;
-    u32     rx_stat_grovr_hi;
-    u32     rx_stat_grflr_lo;
-    u32     rx_stat_grflr_hi;
-    u32     rx_stat_grmeg_lo;
-    u32     rx_stat_grmeg_hi;
-    u32     rx_stat_grmeb_lo;
-    u32     rx_stat_grmeb_hi;
-    u32     rx_stat_grbyt_lo;
-    u32     rx_stat_grbyt_hi;
-    u32     rx_stat_grund_lo;
-    u32     rx_stat_grund_hi;
-    u32     rx_stat_grfrg_lo;
-    u32     rx_stat_grfrg_hi;
-    u32     rx_stat_grerb_lo;
-    u32     rx_stat_grerb_hi;
-    u32     rx_stat_grfre_lo;
-    u32     rx_stat_grfre_hi;
-    u32     rx_stat_gripj_lo;
-    u32     rx_stat_gripj_hi;
+	u32	rx_stat_gr64_lo;
+	u32	rx_stat_gr64_hi;
+	u32	rx_stat_gr127_lo;
+	u32	rx_stat_gr127_hi;
+	u32	rx_stat_gr255_lo;
+	u32	rx_stat_gr255_hi;
+	u32	rx_stat_gr511_lo;
+	u32	rx_stat_gr511_hi;
+	u32	rx_stat_gr1023_lo;
+	u32	rx_stat_gr1023_hi;
+	u32	rx_stat_gr1518_lo;
+	u32	rx_stat_gr1518_hi;
+	u32	rx_stat_gr2047_lo;
+	u32	rx_stat_gr2047_hi;
+	u32	rx_stat_gr4095_lo;
+	u32	rx_stat_gr4095_hi;
+	u32	rx_stat_gr9216_lo;
+	u32	rx_stat_gr9216_hi;
+	u32	rx_stat_gr16383_lo;
+	u32	rx_stat_gr16383_hi;
+	u32	rx_stat_grmax_lo;
+	u32	rx_stat_grmax_hi;
+	u32	rx_stat_grpkt_lo;
+	u32	rx_stat_grpkt_hi;
+	u32	rx_stat_grfcs_lo;
+	u32	rx_stat_grfcs_hi;
+	u32	rx_stat_grmca_lo;
+	u32	rx_stat_grmca_hi;
+	u32	rx_stat_grbca_lo;
+	u32	rx_stat_grbca_hi;
+	u32	rx_stat_grxcf_lo;
+	u32	rx_stat_grxcf_hi;
+	u32	rx_stat_grxpf_lo;
+	u32	rx_stat_grxpf_hi;
+	u32	rx_stat_grxuo_lo;
+	u32	rx_stat_grxuo_hi;
+	u32	rx_stat_grjbr_lo;
+	u32	rx_stat_grjbr_hi;
+	u32	rx_stat_grovr_lo;
+	u32	rx_stat_grovr_hi;
+	u32	rx_stat_grflr_lo;
+	u32	rx_stat_grflr_hi;
+	u32	rx_stat_grmeg_lo;
+	u32	rx_stat_grmeg_hi;
+	u32	rx_stat_grmeb_lo;
+	u32	rx_stat_grmeb_hi;
+	u32	rx_stat_grbyt_lo;
+	u32	rx_stat_grbyt_hi;
+	u32	rx_stat_grund_lo;
+	u32	rx_stat_grund_hi;
+	u32	rx_stat_grfrg_lo;
+	u32	rx_stat_grfrg_hi;
+	u32	rx_stat_grerb_lo;
+	u32	rx_stat_grerb_hi;
+	u32	rx_stat_grfre_lo;
+	u32	rx_stat_grfre_hi;
+	u32	rx_stat_gripj_lo;
+	u32	rx_stat_gripj_hi;
 };
 
 struct bmac2_stats {
@@ -1750,187 +2228,316 @@
 	u32	rx_stat_gripj_hi;
 };
 
+struct mstat_stats {
+	struct {
+		/* OTE MSTAT on E3 has a bug where this register's contents are
+		 * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+		 */
+		u32 tx_gtxpok_lo;
+		u32 tx_gtxpok_hi;
+		u32 tx_gtxpf_lo;
+		u32 tx_gtxpf_hi;
+		u32 tx_gtxpp_lo;
+		u32 tx_gtxpp_hi;
+		u32 tx_gtfcs_lo;
+		u32 tx_gtfcs_hi;
+		u32 tx_gtuca_lo;
+		u32 tx_gtuca_hi;
+		u32 tx_gtmca_lo;
+		u32 tx_gtmca_hi;
+		u32 tx_gtgca_lo;
+		u32 tx_gtgca_hi;
+		u32 tx_gtpkt_lo;
+		u32 tx_gtpkt_hi;
+		u32 tx_gt64_lo;
+		u32 tx_gt64_hi;
+		u32 tx_gt127_lo;
+		u32 tx_gt127_hi;
+		u32 tx_gt255_lo;
+		u32 tx_gt255_hi;
+		u32 tx_gt511_lo;
+		u32 tx_gt511_hi;
+		u32 tx_gt1023_lo;
+		u32 tx_gt1023_hi;
+		u32 tx_gt1518_lo;
+		u32 tx_gt1518_hi;
+		u32 tx_gt2047_lo;
+		u32 tx_gt2047_hi;
+		u32 tx_gt4095_lo;
+		u32 tx_gt4095_hi;
+		u32 tx_gt9216_lo;
+		u32 tx_gt9216_hi;
+		u32 tx_gt16383_lo;
+		u32 tx_gt16383_hi;
+		u32 tx_gtufl_lo;
+		u32 tx_gtufl_hi;
+		u32 tx_gterr_lo;
+		u32 tx_gterr_hi;
+		u32 tx_gtbyt_lo;
+		u32 tx_gtbyt_hi;
+		u32 tx_collisions_lo;
+		u32 tx_collisions_hi;
+		u32 tx_singlecollision_lo;
+		u32 tx_singlecollision_hi;
+		u32 tx_multiplecollisions_lo;
+		u32 tx_multiplecollisions_hi;
+		u32 tx_deferred_lo;
+		u32 tx_deferred_hi;
+		u32 tx_excessivecollisions_lo;
+		u32 tx_excessivecollisions_hi;
+		u32 tx_latecollisions_lo;
+		u32 tx_latecollisions_hi;
+	} stats_tx;
+
+	struct {
+		u32 rx_gr64_lo;
+		u32 rx_gr64_hi;
+		u32 rx_gr127_lo;
+		u32 rx_gr127_hi;
+		u32 rx_gr255_lo;
+		u32 rx_gr255_hi;
+		u32 rx_gr511_lo;
+		u32 rx_gr511_hi;
+		u32 rx_gr1023_lo;
+		u32 rx_gr1023_hi;
+		u32 rx_gr1518_lo;
+		u32 rx_gr1518_hi;
+		u32 rx_gr2047_lo;
+		u32 rx_gr2047_hi;
+		u32 rx_gr4095_lo;
+		u32 rx_gr4095_hi;
+		u32 rx_gr9216_lo;
+		u32 rx_gr9216_hi;
+		u32 rx_gr16383_lo;
+		u32 rx_gr16383_hi;
+		u32 rx_grpkt_lo;
+		u32 rx_grpkt_hi;
+		u32 rx_grfcs_lo;
+		u32 rx_grfcs_hi;
+		u32 rx_gruca_lo;
+		u32 rx_gruca_hi;
+		u32 rx_grmca_lo;
+		u32 rx_grmca_hi;
+		u32 rx_grbca_lo;
+		u32 rx_grbca_hi;
+		u32 rx_grxpf_lo;
+		u32 rx_grxpf_hi;
+		u32 rx_grxpp_lo;
+		u32 rx_grxpp_hi;
+		u32 rx_grxuo_lo;
+		u32 rx_grxuo_hi;
+		u32 rx_grovr_lo;
+		u32 rx_grovr_hi;
+		u32 rx_grxcf_lo;
+		u32 rx_grxcf_hi;
+		u32 rx_grflr_lo;
+		u32 rx_grflr_hi;
+		u32 rx_grpok_lo;
+		u32 rx_grpok_hi;
+		u32 rx_grbyt_lo;
+		u32 rx_grbyt_hi;
+		u32 rx_grund_lo;
+		u32 rx_grund_hi;
+		u32 rx_grfrg_lo;
+		u32 rx_grfrg_hi;
+		u32 rx_grerb_lo;
+		u32 rx_grerb_hi;
+		u32 rx_grfre_lo;
+		u32 rx_grfre_hi;
+
+		u32 rx_alignmenterrors_lo;
+		u32 rx_alignmenterrors_hi;
+		u32 rx_falsecarrier_lo;
+		u32 rx_falsecarrier_hi;
+		u32 rx_llfcmsgcnt_lo;
+		u32 rx_llfcmsgcnt_hi;
+	} stats_rx;
+};
+
 union mac_stats {
-	struct emac_stats	 emac_stats;
-	struct bmac1_stats	 bmac1_stats;
-	struct bmac2_stats	 bmac2_stats;
+	struct emac_stats	emac_stats;
+	struct bmac1_stats	bmac1_stats;
+	struct bmac2_stats	bmac2_stats;
+	struct mstat_stats	mstat_stats;
 };
 
 
 struct mac_stx {
-    /* in_bad_octets */
-    u32     rx_stat_ifhcinbadoctets_hi;
-    u32     rx_stat_ifhcinbadoctets_lo;
+	/* in_bad_octets */
+	u32     rx_stat_ifhcinbadoctets_hi;
+	u32     rx_stat_ifhcinbadoctets_lo;
 
-    /* out_bad_octets */
-    u32     tx_stat_ifhcoutbadoctets_hi;
-    u32     tx_stat_ifhcoutbadoctets_lo;
+	/* out_bad_octets */
+	u32     tx_stat_ifhcoutbadoctets_hi;
+	u32     tx_stat_ifhcoutbadoctets_lo;
 
-    /* crc_receive_errors */
-    u32     rx_stat_dot3statsfcserrors_hi;
-    u32     rx_stat_dot3statsfcserrors_lo;
-    /* alignment_errors */
-    u32     rx_stat_dot3statsalignmenterrors_hi;
-    u32     rx_stat_dot3statsalignmenterrors_lo;
-    /* carrier_sense_errors */
-    u32     rx_stat_dot3statscarriersenseerrors_hi;
-    u32     rx_stat_dot3statscarriersenseerrors_lo;
-    /* false_carrier_detections */
-    u32     rx_stat_falsecarriererrors_hi;
-    u32     rx_stat_falsecarriererrors_lo;
+	/* crc_receive_errors */
+	u32     rx_stat_dot3statsfcserrors_hi;
+	u32     rx_stat_dot3statsfcserrors_lo;
+	/* alignment_errors */
+	u32     rx_stat_dot3statsalignmenterrors_hi;
+	u32     rx_stat_dot3statsalignmenterrors_lo;
+	/* carrier_sense_errors */
+	u32     rx_stat_dot3statscarriersenseerrors_hi;
+	u32     rx_stat_dot3statscarriersenseerrors_lo;
+	/* false_carrier_detections */
+	u32     rx_stat_falsecarriererrors_hi;
+	u32     rx_stat_falsecarriererrors_lo;
 
-    /* runt_packets_received */
-    u32     rx_stat_etherstatsundersizepkts_hi;
-    u32     rx_stat_etherstatsundersizepkts_lo;
-    /* jabber_packets_received */
-    u32     rx_stat_dot3statsframestoolong_hi;
-    u32     rx_stat_dot3statsframestoolong_lo;
+	/* runt_packets_received */
+	u32     rx_stat_etherstatsundersizepkts_hi;
+	u32     rx_stat_etherstatsundersizepkts_lo;
+	/* jabber_packets_received */
+	u32     rx_stat_dot3statsframestoolong_hi;
+	u32     rx_stat_dot3statsframestoolong_lo;
 
-    /* error_runt_packets_received */
-    u32     rx_stat_etherstatsfragments_hi;
-    u32     rx_stat_etherstatsfragments_lo;
-    /* error_jabber_packets_received */
-    u32     rx_stat_etherstatsjabbers_hi;
-    u32     rx_stat_etherstatsjabbers_lo;
+	/* error_runt_packets_received */
+	u32     rx_stat_etherstatsfragments_hi;
+	u32     rx_stat_etherstatsfragments_lo;
+	/* error_jabber_packets_received */
+	u32     rx_stat_etherstatsjabbers_hi;
+	u32     rx_stat_etherstatsjabbers_lo;
 
-    /* control_frames_received */
-    u32     rx_stat_maccontrolframesreceived_hi;
-    u32     rx_stat_maccontrolframesreceived_lo;
-    u32     rx_stat_bmac_xpf_hi;
-    u32     rx_stat_bmac_xpf_lo;
-    u32     rx_stat_bmac_xcf_hi;
-    u32     rx_stat_bmac_xcf_lo;
+	/* control_frames_received */
+	u32     rx_stat_maccontrolframesreceived_hi;
+	u32     rx_stat_maccontrolframesreceived_lo;
+	u32     rx_stat_mac_xpf_hi;
+	u32     rx_stat_mac_xpf_lo;
+	u32     rx_stat_mac_xcf_hi;
+	u32     rx_stat_mac_xcf_lo;
 
-    /* xoff_state_entered */
-    u32     rx_stat_xoffstateentered_hi;
-    u32     rx_stat_xoffstateentered_lo;
-    /* pause_xon_frames_received */
-    u32     rx_stat_xonpauseframesreceived_hi;
-    u32     rx_stat_xonpauseframesreceived_lo;
-    /* pause_xoff_frames_received */
-    u32     rx_stat_xoffpauseframesreceived_hi;
-    u32     rx_stat_xoffpauseframesreceived_lo;
-    /* pause_xon_frames_transmitted */
-    u32     tx_stat_outxonsent_hi;
-    u32     tx_stat_outxonsent_lo;
-    /* pause_xoff_frames_transmitted */
-    u32     tx_stat_outxoffsent_hi;
-    u32     tx_stat_outxoffsent_lo;
-    /* flow_control_done */
-    u32     tx_stat_flowcontroldone_hi;
-    u32     tx_stat_flowcontroldone_lo;
+	/* xoff_state_entered */
+	u32     rx_stat_xoffstateentered_hi;
+	u32     rx_stat_xoffstateentered_lo;
+	/* pause_xon_frames_received */
+	u32     rx_stat_xonpauseframesreceived_hi;
+	u32     rx_stat_xonpauseframesreceived_lo;
+	/* pause_xoff_frames_received */
+	u32     rx_stat_xoffpauseframesreceived_hi;
+	u32     rx_stat_xoffpauseframesreceived_lo;
+	/* pause_xon_frames_transmitted */
+	u32     tx_stat_outxonsent_hi;
+	u32     tx_stat_outxonsent_lo;
+	/* pause_xoff_frames_transmitted */
+	u32     tx_stat_outxoffsent_hi;
+	u32     tx_stat_outxoffsent_lo;
+	/* flow_control_done */
+	u32     tx_stat_flowcontroldone_hi;
+	u32     tx_stat_flowcontroldone_lo;
 
-    /* ether_stats_collisions */
-    u32     tx_stat_etherstatscollisions_hi;
-    u32     tx_stat_etherstatscollisions_lo;
-    /* single_collision_transmit_frames */
-    u32     tx_stat_dot3statssinglecollisionframes_hi;
-    u32     tx_stat_dot3statssinglecollisionframes_lo;
-    /* multiple_collision_transmit_frames */
-    u32     tx_stat_dot3statsmultiplecollisionframes_hi;
-    u32     tx_stat_dot3statsmultiplecollisionframes_lo;
-    /* deferred_transmissions */
-    u32     tx_stat_dot3statsdeferredtransmissions_hi;
-    u32     tx_stat_dot3statsdeferredtransmissions_lo;
-    /* excessive_collision_frames */
-    u32     tx_stat_dot3statsexcessivecollisions_hi;
-    u32     tx_stat_dot3statsexcessivecollisions_lo;
-    /* late_collision_frames */
-    u32     tx_stat_dot3statslatecollisions_hi;
-    u32     tx_stat_dot3statslatecollisions_lo;
+	/* ether_stats_collisions */
+	u32     tx_stat_etherstatscollisions_hi;
+	u32     tx_stat_etherstatscollisions_lo;
+	/* single_collision_transmit_frames */
+	u32     tx_stat_dot3statssinglecollisionframes_hi;
+	u32     tx_stat_dot3statssinglecollisionframes_lo;
+	/* multiple_collision_transmit_frames */
+	u32     tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32     tx_stat_dot3statsmultiplecollisionframes_lo;
+	/* deferred_transmissions */
+	u32     tx_stat_dot3statsdeferredtransmissions_hi;
+	u32     tx_stat_dot3statsdeferredtransmissions_lo;
+	/* excessive_collision_frames */
+	u32     tx_stat_dot3statsexcessivecollisions_hi;
+	u32     tx_stat_dot3statsexcessivecollisions_lo;
+	/* late_collision_frames */
+	u32     tx_stat_dot3statslatecollisions_hi;
+	u32     tx_stat_dot3statslatecollisions_lo;
 
-    /* frames_transmitted_64_bytes */
-    u32     tx_stat_etherstatspkts64octets_hi;
-    u32     tx_stat_etherstatspkts64octets_lo;
-    /* frames_transmitted_65_127_bytes */
-    u32     tx_stat_etherstatspkts65octetsto127octets_hi;
-    u32     tx_stat_etherstatspkts65octetsto127octets_lo;
-    /* frames_transmitted_128_255_bytes */
-    u32     tx_stat_etherstatspkts128octetsto255octets_hi;
-    u32     tx_stat_etherstatspkts128octetsto255octets_lo;
-    /* frames_transmitted_256_511_bytes */
-    u32     tx_stat_etherstatspkts256octetsto511octets_hi;
-    u32     tx_stat_etherstatspkts256octetsto511octets_lo;
-    /* frames_transmitted_512_1023_bytes */
-    u32     tx_stat_etherstatspkts512octetsto1023octets_hi;
-    u32     tx_stat_etherstatspkts512octetsto1023octets_lo;
-    /* frames_transmitted_1024_1522_bytes */
-    u32     tx_stat_etherstatspkts1024octetsto1522octets_hi;
-    u32     tx_stat_etherstatspkts1024octetsto1522octets_lo;
-    /* frames_transmitted_1523_9022_bytes */
-    u32     tx_stat_etherstatspktsover1522octets_hi;
-    u32     tx_stat_etherstatspktsover1522octets_lo;
-    u32     tx_stat_bmac_2047_hi;
-    u32     tx_stat_bmac_2047_lo;
-    u32     tx_stat_bmac_4095_hi;
-    u32     tx_stat_bmac_4095_lo;
-    u32     tx_stat_bmac_9216_hi;
-    u32     tx_stat_bmac_9216_lo;
-    u32     tx_stat_bmac_16383_hi;
-    u32     tx_stat_bmac_16383_lo;
+	/* frames_transmitted_64_bytes */
+	u32     tx_stat_etherstatspkts64octets_hi;
+	u32     tx_stat_etherstatspkts64octets_lo;
+	/* frames_transmitted_65_127_bytes */
+	u32     tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32     tx_stat_etherstatspkts65octetsto127octets_lo;
+	/* frames_transmitted_128_255_bytes */
+	u32     tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32     tx_stat_etherstatspkts128octetsto255octets_lo;
+	/* frames_transmitted_256_511_bytes */
+	u32     tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32     tx_stat_etherstatspkts256octetsto511octets_lo;
+	/* frames_transmitted_512_1023_bytes */
+	u32     tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32     tx_stat_etherstatspkts512octetsto1023octets_lo;
+	/* frames_transmitted_1024_1522_bytes */
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	/* frames_transmitted_1523_9022_bytes */
+	u32     tx_stat_etherstatspktsover1522octets_hi;
+	u32     tx_stat_etherstatspktsover1522octets_lo;
+	u32     tx_stat_mac_2047_hi;
+	u32     tx_stat_mac_2047_lo;
+	u32     tx_stat_mac_4095_hi;
+	u32     tx_stat_mac_4095_lo;
+	u32     tx_stat_mac_9216_hi;
+	u32     tx_stat_mac_9216_lo;
+	u32     tx_stat_mac_16383_hi;
+	u32     tx_stat_mac_16383_lo;
 
-    /* internal_mac_transmit_errors */
-    u32     tx_stat_dot3statsinternalmactransmiterrors_hi;
-    u32     tx_stat_dot3statsinternalmactransmiterrors_lo;
+	/* internal_mac_transmit_errors */
+	u32     tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32     tx_stat_dot3statsinternalmactransmiterrors_lo;
 
-    /* if_out_discards */
-    u32     tx_stat_bmac_ufl_hi;
-    u32     tx_stat_bmac_ufl_lo;
+	/* if_out_discards */
+	u32     tx_stat_mac_ufl_hi;
+	u32     tx_stat_mac_ufl_lo;
 };
 
 
-#define MAC_STX_IDX_MAX 		    2
+#define MAC_STX_IDX_MAX                     2
 
 struct host_port_stats {
-    u32 	   host_port_stats_start;
+	u32            host_port_stats_start;
 
-    struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+	struct mac_stx mac_stx[MAC_STX_IDX_MAX];
 
-    u32 	   brb_drop_hi;
-    u32 	   brb_drop_lo;
+	u32            brb_drop_hi;
+	u32            brb_drop_lo;
 
-    u32 	   host_port_stats_end;
+	u32            host_port_stats_end;
 };
 
 
 struct host_func_stats {
-    u32     host_func_stats_start;
+	u32     host_func_stats_start;
 
-    u32     total_bytes_received_hi;
-    u32     total_bytes_received_lo;
+	u32     total_bytes_received_hi;
+	u32     total_bytes_received_lo;
 
-    u32     total_bytes_transmitted_hi;
-    u32     total_bytes_transmitted_lo;
+	u32     total_bytes_transmitted_hi;
+	u32     total_bytes_transmitted_lo;
 
-    u32     total_unicast_packets_received_hi;
-    u32     total_unicast_packets_received_lo;
+	u32     total_unicast_packets_received_hi;
+	u32     total_unicast_packets_received_lo;
 
-    u32     total_multicast_packets_received_hi;
-    u32     total_multicast_packets_received_lo;
+	u32     total_multicast_packets_received_hi;
+	u32     total_multicast_packets_received_lo;
 
-    u32     total_broadcast_packets_received_hi;
-    u32     total_broadcast_packets_received_lo;
+	u32     total_broadcast_packets_received_hi;
+	u32     total_broadcast_packets_received_lo;
 
-    u32     total_unicast_packets_transmitted_hi;
-    u32     total_unicast_packets_transmitted_lo;
+	u32     total_unicast_packets_transmitted_hi;
+	u32     total_unicast_packets_transmitted_lo;
 
-    u32     total_multicast_packets_transmitted_hi;
-    u32     total_multicast_packets_transmitted_lo;
+	u32     total_multicast_packets_transmitted_hi;
+	u32     total_multicast_packets_transmitted_lo;
 
-    u32     total_broadcast_packets_transmitted_hi;
-    u32     total_broadcast_packets_transmitted_lo;
+	u32     total_broadcast_packets_transmitted_hi;
+	u32     total_broadcast_packets_transmitted_lo;
 
-    u32     valid_bytes_received_hi;
-    u32     valid_bytes_received_lo;
+	u32     valid_bytes_received_hi;
+	u32     valid_bytes_received_lo;
 
-    u32     host_func_stats_end;
+	u32     host_func_stats_end;
 };
 
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
 
-#define BCM_5710_FW_MAJOR_VERSION			6
-#define BCM_5710_FW_MINOR_VERSION			2
-#define BCM_5710_FW_REVISION_VERSION			9
-#define BCM_5710_FW_ENGINEERING_VERSION			0
+#define BCM_5710_FW_MAJOR_VERSION			7
+#define BCM_5710_FW_MINOR_VERSION			0
+#define BCM_5710_FW_REVISION_VERSION		20
+#define BCM_5710_FW_ENGINEERING_VERSION		0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
 
@@ -1948,6 +2555,115 @@
 
 
 /*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+	u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+	u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+	u32 src_addr_lo;
+	u32 src_addr_hi;
+	u32 dst_addr_lo;
+	u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+	u16 len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 len;
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+	u32 comp_addr_lo;
+	u32 comp_addr_hi;
+	u32 comp_val;
+	u32 crc32;
+	u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+	u16 crc16_c;
+	u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc16;
+	u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc_t10;
+	u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xsum8;
+	u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 xsum16;
+	u16 xsum8;
+#endif
+};
+
+
+/*
  * common data for all protocols
  */
 struct doorbell_hdr {
@@ -1963,33 +2679,29 @@
 };
 
 /*
- * doorbell message sent to the chip
+ * Ethernet doorbell
  */
-struct doorbell {
+struct eth_tx_doorbell {
 #if defined(__BIG_ENDIAN)
-	u16 zero_fill2;
-	u8 zero_fill1;
-	struct doorbell_hdr header;
+	u16 npackets;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	struct doorbell_hdr hdr;
 #elif defined(__LITTLE_ENDIAN)
-	struct doorbell_hdr header;
-	u8 zero_fill1;
-	u16 zero_fill2;
-#endif
-};
-
-
-/*
- * doorbell message sent to the chip
- */
-struct doorbell_set_prod {
-#if defined(__BIG_ENDIAN)
-	u16 prod;
-	u8 zero_fill1;
-	struct doorbell_hdr header;
-#elif defined(__LITTLE_ENDIAN)
-	struct doorbell_hdr header;
-	u8 zero_fill1;
-	u16 prod;
+	struct doorbell_hdr hdr;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	u16 npackets;
 #endif
 };
 
@@ -2000,7 +2712,7 @@
 struct hc_status_block_e1x {
 	__le16 index_values[HC_SB_MAX_INDICES_E1X];
 	__le16 running_index[HC_SB_MAX_SM];
-	u32 rsrv;
+	__le32 rsrv[11];
 };
 
 /*
@@ -2017,7 +2729,7 @@
 struct hc_status_block_e2 {
 	__le16 index_values[HC_SB_MAX_INDICES_E2];
 	__le16 running_index[HC_SB_MAX_SM];
-	u32 reserved;
+	__le32 reserved[11];
 };
 
 /*
@@ -2138,6 +2850,16 @@
 
 
 /*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+	IGU_CTRL_CMD_TYPE_RD,
+	IGU_CTRL_CMD_TYPE_WR,
+	MAX_IGU_CTRL_CMD
+};
+
+
+/*
  * Control register for the IGU command register
  */
 struct igu_ctrl_reg {
@@ -2156,6 +2878,29 @@
 
 
 /*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+	IGU_INT_ENABLE,
+	IGU_INT_DISABLE,
+	IGU_INT_NOP,
+	IGU_INT_NOP2,
+	MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+	IGU_SEG_ACCESS_NORM,
+	IGU_SEG_ACCESS_DEF,
+	IGU_SEG_ACCESS_ATTN,
+	MAX_IGU_SEG_ACCESS
+};
+
+
+/*
  * Parser parsing flags field
  */
 struct parsing_flags {
@@ -2189,94 +2934,46 @@
 };
 
 
-struct regpair {
-	__le32 lo;
-	__le32 hi;
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+	PRS_FLAG_PUREACK_PIGGY,
+	PRS_FLAG_PUREACK_PURE,
+	MAX_PRS_FLAGS_ACK_TYPE
 };
 
 
 /*
- * dmae command structure
+ * Parsing flags for Ethernet address type
  */
-struct dmae_command {
-	u32 opcode;
-#define DMAE_COMMAND_SRC (0x1<<0)
-#define DMAE_COMMAND_SRC_SHIFT 0
-#define DMAE_COMMAND_DST (0x3<<1)
-#define DMAE_COMMAND_DST_SHIFT 1
-#define DMAE_COMMAND_C_DST (0x1<<3)
-#define DMAE_COMMAND_C_DST_SHIFT 3
-#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
-#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
-#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
-#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
-#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
-#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
-#define DMAE_COMMAND_ENDIANITY (0x3<<9)
-#define DMAE_COMMAND_ENDIANITY_SHIFT 9
-#define DMAE_COMMAND_PORT (0x1<<11)
-#define DMAE_COMMAND_PORT_SHIFT 11
-#define DMAE_COMMAND_CRC_RESET (0x1<<12)
-#define DMAE_COMMAND_CRC_RESET_SHIFT 12
-#define DMAE_COMMAND_SRC_RESET (0x1<<13)
-#define DMAE_COMMAND_SRC_RESET_SHIFT 13
-#define DMAE_COMMAND_DST_RESET (0x1<<14)
-#define DMAE_COMMAND_DST_RESET_SHIFT 14
-#define DMAE_COMMAND_E1HVN (0x3<<15)
-#define DMAE_COMMAND_E1HVN_SHIFT 15
-#define DMAE_COMMAND_DST_VN (0x3<<17)
-#define DMAE_COMMAND_DST_VN_SHIFT 17
-#define DMAE_COMMAND_C_FUNC (0x1<<19)
-#define DMAE_COMMAND_C_FUNC_SHIFT 19
-#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
-#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
-#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
-#define DMAE_COMMAND_RESERVED0_SHIFT 22
-	u32 src_addr_lo;
-	u32 src_addr_hi;
-	u32 dst_addr_lo;
-	u32 dst_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u16 len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 len;
-	u16 reserved1;
-#endif
-	u32 comp_addr_lo;
-	u32 comp_addr_hi;
-	u32 comp_val;
-	u32 crc32;
-	u32 crc32_c;
-#if defined(__BIG_ENDIAN)
-	u16 crc16_c;
-	u16 crc16;
-#elif defined(__LITTLE_ENDIAN)
-	u16 crc16;
-	u16 crc16_c;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 reserved3;
-	u16 crc_t10;
-#elif defined(__LITTLE_ENDIAN)
-	u16 crc_t10;
-	u16 reserved3;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 xsum8;
-	u16 xsum16;
-#elif defined(__LITTLE_ENDIAN)
-	u16 xsum16;
-	u16 xsum8;
-#endif
+enum prs_flags_eth_addr_type {
+	PRS_FLAG_ETHTYPE_NON_UNICAST,
+	PRS_FLAG_ETHTYPE_UNICAST,
+	MAX_PRS_FLAGS_ETH_ADDR_TYPE
 };
 
 
-struct double_regpair {
-	u32 regpair0_lo;
-	u32 regpair0_hi;
-	u32 regpair1_lo;
-	u32 regpair1_hi;
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+	PRS_FLAG_OVERETH_UNKNOWN,
+	PRS_FLAG_OVERETH_IPV4,
+	PRS_FLAG_OVERETH_IPV6,
+	PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+	MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+	PRS_FLAG_OVERIP_UNKNOWN,
+	PRS_FLAG_OVERIP_TCP,
+	PRS_FLAG_OVERIP_UDP,
+	MAX_PRS_FLAGS_OVER_IP
 };
 
 
@@ -2297,55 +2994,24 @@
 #define SDM_OP_GEN_RESERVED_SHIFT 17
 };
 
-/*
- * The eth Rx Buffer Descriptor
- */
-struct eth_rx_bd {
-	__le32 addr_lo;
-	__le32 addr_hi;
-};
 
 /*
- * The eth Rx SGE Descriptor
+ * Timers connection context
  */
-struct eth_rx_sge {
-	__le32 addr_lo;
-	__le32 addr_hi;
+struct timers_block_context {
+	u32 __reserved_0;
+	u32 __reserved_1;
+	u32 __reserved_2;
+	u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
 };
 
 
-
-/*
- * The eth storm context of Ustorm
- */
-struct ustorm_eth_st_context {
-	u32 reserved0[48];
-};
-
-/*
- * The eth storm context of Tstorm
- */
-struct tstorm_eth_st_context {
-	u32 __reserved0[28];
-};
-
-/*
- * The eth aggregative context of Xstorm
- */
-struct xstorm_eth_ag_context {
-	u32 reserved0;
-#if defined(__BIG_ENDIAN)
-	u8 cdu_reserved;
-	u8 reserved2;
-	u16 reserved1;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved1;
-	u8 reserved2;
-	u8 cdu_reserved;
-#endif
-	u32 reserved3[30];
-};
-
 /*
  * The eth aggregative context of Tstorm
  */
@@ -2355,14 +3021,6 @@
 
 
 /*
- * The eth aggregative context of Cstorm
- */
-struct cstorm_eth_ag_context {
-	u32 __reserved0[10];
-};
-
-
-/*
  * The eth aggregative context of Ustorm
  */
 struct ustorm_eth_ag_context {
@@ -2379,22 +3037,808 @@
 	u32 __reserved3[6];
 };
 
+
 /*
- * Timers connection context
+ * The eth aggregative context of Xstorm
  */
-struct timers_block_context {
-	u32 __reserved_0;
-	u32 __reserved_1;
-	u32 __reserved_2;
-	u32 flags;
-#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
-#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
-#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
-#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
-#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
-#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+struct xstorm_eth_ag_context {
+	u32 reserved0;
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 reserved2;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u8 reserved2;
+	u8 cdu_reserved;
+#endif
+	u32 reserved3[30];
 };
 
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 zero_fill2;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 zero_fill2;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+	u16 prod;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 prod;
+#endif
+};
+
+
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule {
+	CLASSIFY_RULE_OPCODE_MAC,
+	CLASSIFY_RULE_OPCODE_VLAN,
+	CLASSIFY_RULE_OPCODE_PAIR,
+	MAX_CLASSIFY_RULE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type {
+	CLASSIFY_RULE_REMOVE,
+	CLASSIFY_RULE_ADD,
+	MAX_CLASSIFY_RULE_ACTION_TYPE
+};
+
+
+/*
+ * client init ramrod data
+ */
+struct client_init_general_data {
+	u8 client_id;
+	u8 statistics_counter_id;
+	u8 statistics_en_flg;
+	u8 is_fcoe_flg;
+	u8 activate_flg;
+	u8 sp_client_id;
+	__le16 mtu;
+	u8 statistics_zero_flg;
+	u8 func_id;
+	u8 cos;
+	u8 traffic_type;
+	u32 reserved0;
+};
+
+
+/*
+ * client init rx data
+ */
+struct client_init_rx_data {
+	u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+	u8 vmqueue_mode_en_flg;
+	u8 extra_data_over_sgl_en_flg;
+	u8 cache_line_alignment_log_size;
+	u8 enable_dynamic_hc;
+	u8 max_sges_for_packet;
+	u8 client_qzone_id;
+	u8 drop_ip_cs_err_flg;
+	u8 drop_tcp_cs_err_flg;
+	u8 drop_ttl0_flg;
+	u8 drop_udp_cs_err_flg;
+	u8 inner_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 status_block_id;
+	u8 rx_sb_index_number;
+	u8 reserved0;
+	u8 max_tpa_queues;
+	u8 silent_vlan_removal_flg;
+	__le16 max_bytes_on_bd;
+	__le16 sge_buff_size;
+	u8 approx_mcast_engine_id;
+	u8 rss_engine_id;
+	struct regpair bd_page_base;
+	struct regpair sge_page_base;
+	struct regpair cqe_page_base;
+	u8 is_leading_rss;
+	u8 is_approx_mcast;
+	__le16 max_agg_size;
+	__le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+	__le16 cqe_pause_thr_low;
+	__le16 cqe_pause_thr_high;
+	__le16 bd_pause_thr_low;
+	__le16 bd_pause_thr_high;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+	__le16 rx_cos_mask;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	__le32 reserved6[2];
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+	u8 enforce_security_flg;
+	u8 tx_status_block_id;
+	u8 tx_sb_index_number;
+	u8 tss_leading_client_id;
+	u8 tx_switching_flg;
+	u8 anti_spoofing_flg;
+	__le16 default_vlan;
+	struct regpair tx_bd_page_base;
+	__le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+	u8 default_vlan_flg;
+	u8 reserved2;
+	__le32 reserved3;
+};
+
+/*
+ * client init ramrod data
+ */
+struct client_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_rx_data rx;
+	struct client_init_tx_data tx;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+	u8 client_id;
+	u8 func_id;
+	u8 inner_vlan_removal_enable_flg;
+	u8 inner_vlan_removal_change_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_change_flg;
+	u8 anti_spoofing_enable_flg;
+	u8 anti_spoofing_change_flg;
+	u8 activate_flg;
+	u8 activate_change_flg;
+	__le16 default_vlan;
+	u8 default_vlan_enable_flg;
+	u8 default_vlan_change_flg;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	u8 silent_vlan_removal_flg;
+	u8 silent_vlan_change_flg;
+	__le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+	u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+	u32 regpair0_lo;
+	u32 regpair0_hi;
+	u32 regpair1_lo;
+	u32 regpair1_hi;
+};
+
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+	UNKNOWN_ADDRESS,
+	UNICAST_ADDRESS,
+	MULTICAST_ADDRESS,
+	BROADCAST_ADDRESS,
+	MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+	u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+	u8 rule_cnt;
+	u8 reserved0;
+	__le16 reserved1;
+	__le32 echo;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le32 reserved1;
+	__le16 reserved2;
+	__le16 vlan;
+};
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+	struct eth_classify_mac_cmd mac;
+	struct eth_classify_vlan_cmd vlan;
+	struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod
+ */
+struct eth_common_ramrod_data {
+	__le32 client_id;
+	__le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+	u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+	u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+	u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+	struct ustorm_eth_st_context ustorm_st_context;
+	struct tstorm_eth_st_context tstorm_st_context;
+	struct xstorm_eth_ag_context xstorm_ag_context;
+	struct tstorm_eth_ag_context tstorm_ag_context;
+	struct cstorm_eth_ag_context cstorm_ag_context;
+	struct ustorm_eth_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_eth_st_context xstorm_st_context;
+	struct cstorm_eth_st_context cstorm_st_context;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data {
+	__le16 sgl[8];
+	u32 raw_data[4];
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+	u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+	u8 reserved1;
+	u8 queue_index;
+	u8 reserved2;
+	__le32 timestamp_delta;
+	__le16 num_of_coalesced_segs;
+	__le16 pkt_len;
+	u8 pure_ack_count;
+	u8 reserved3;
+	__le16 reserved4;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	__le32 reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct
+ */
+struct eth_fast_path_rx_cqe {
+	u8 type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+	u8 status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+	u8 queue_index;
+	u8 placement_offset;
+	__le32 rss_hash_result;
+	__le16 vlan_tag;
+	__le16 pkt_len;
+	__le16 len_on_bd;
+	struct parsing_flags pars_flags;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	__le32 reserved1[8];
+};
+
+
+/*
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+	__le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+	__le16 reserved3;
+	struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data {
+	__le32 client_id;
+	__le32 reserved0;
+};
+
+
+/*
+ * Command for setting multicast classification for a client
+ */
+struct eth_multicast_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+	u8 func_id;
+	u8 bin_id;
+	u8 engine_id;
+	__le32 reserved2;
+	struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data {
+	__le32 data_lo;
+	__le32 data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data {
+	struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+	DEFAULT_HASH_TYPE,
+	IPV4_HASH_TYPE,
+	TCP_IPV4_HASH_TYPE,
+	IPV6_HASH_TYPE,
+	TCP_IPV6_HASH_TYPE,
+	VLAN_PRI_HASH_TYPE,
+	E1HOV_PRI_HASH_TYPE,
+	DSCP_HASH_TYPE,
+	MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+	ETH_RSS_MODE_DISABLED,
+	ETH_RSS_MODE_REGULAR,
+	ETH_RSS_MODE_VLAN_PRI,
+	ETH_RSS_MODE_E1HOV_PRI,
+	ETH_RSS_MODE_IP_DSCP,
+	MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+	u8 rss_engine_id;
+	u8 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+	u8 rss_result_mask;
+	u8 rss_mode;
+	__le32 __reserved2;
+	u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+	__le32 rss_key[T_ETH_RSS_KEY];
+	__le32 echo;
+	__le32 reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+struct common_ramrod_eth_rx_cqe {
+	u8 ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+	u8 conn_type;
+	__le16 reserved1;
+	__le32 conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+	struct ramrod_data protocol_data;
+	__le32 echo;
+	__le32 reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe {
+	struct eth_fast_path_rx_cqe fast_path_cqe;
+	struct common_ramrod_eth_rx_cqe ramrod_cqe;
+	struct eth_rx_cqe_next_page next_page_cqe;
+	struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+	RX_ETH_CQE_TYPE_ETH_FASTPATH,
+	RX_ETH_CQE_TYPE_ETH_RAMROD,
+	RX_ETH_CQE_TYPE_ETH_START_AGG,
+	RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+	MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+	ETH_FP_CQE_REGULAR,
+	ETH_FP_CQE_RAW,
+	MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct spe_hdr {
+	__le32 conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0)
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24)
+#define SPE_HDR_CMD_ID_SHIFT 24
+	__le16 type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0)
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+	__le16 reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data {
+	u8 protocol_data[8];
+	struct regpair client_update_ramrod_data;
+	struct regpair client_init_ramrod_init_data;
+	struct eth_halt_ramrod_data halt_ramrod_data;
+	struct regpair update_data_addr;
+	struct eth_common_ramrod_data common_ramrod_data;
+	struct regpair classify_cfg_addr;
+	struct regpair filter_cfg_addr;
+	struct regpair mcast_cfg_addr;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe {
+	struct spe_hdr hdr;
+	union eth_specific_data data;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id {
+	RAMROD_CMD_ID_ETH_UNUSED,
+	RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+	RAMROD_CMD_ID_ETH_HALT,
+	RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+	RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+	RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+	RAMROD_CMD_ID_ETH_EMPTY,
+	RAMROD_CMD_ID_ETH_TERMINATE,
+	RAMROD_CMD_ID_ETH_TPA_UPDATE,
+	RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+	RAMROD_CMD_ID_ETH_FILTER_RULES,
+	RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+	RAMROD_CMD_ID_ETH_RSS_UPDATE,
+	RAMROD_CMD_ID_ETH_SET_MAC,
+	MAX_ETH_SPQE_CMD_ID
+};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command {
+	TPA_UPDATE_NONE_COMMAND,
+	TPA_UPDATE_ENABLE_COMMAND,
+	TPA_UPDATE_DISABLE_COMMAND,
+	MAX_ETH_TPA_UPDATE_COMMAND
+};
+
+
+/*
+ * Tx regular BD structure
+ */
+struct eth_tx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le16 total_pkt_bytes;
+	__le16 nbytes;
+	u8 reserved[4];
+};
+
+
 /*
  * structure for easy accessibility to assembler
  */
@@ -2427,24 +3871,17 @@
 	__le16 vlan_or_ethertype;
 	struct eth_tx_bd_flags bd_flags;
 	u8 general_data;
-#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
 #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_RESREVED (0x1<<5)
+#define ETH_TX_START_BD_RESREVED_SHIFT 5
 #define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
 #define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
 };
 
 /*
- * Tx regular BD structure
- */
-struct eth_tx_bd {
-	__le32 addr_lo;
-	__le32 addr_hi;
-	__le16 total_pkt_bytes;
-	__le16 nbytes;
-	u8 reserved[4];
-};
-
-/*
  * Tx parsing BD structure for ETH E1/E1h
  */
 struct eth_tx_parse_bd_e1x {
@@ -2526,336 +3963,6 @@
 	struct eth_tx_next_bd next_bd;
 };
 
-
-/*
- * The eth storm context of Xstorm
- */
-struct xstorm_eth_st_context {
-	u32 reserved0[60];
-};
-
-/*
- * The eth storm context of Cstorm
- */
-struct cstorm_eth_st_context {
-	u32 __reserved0[4];
-};
-
-/*
- * Ethernet connection context
- */
-struct eth_context {
-	struct ustorm_eth_st_context ustorm_st_context;
-	struct tstorm_eth_st_context tstorm_st_context;
-	struct xstorm_eth_ag_context xstorm_ag_context;
-	struct tstorm_eth_ag_context tstorm_ag_context;
-	struct cstorm_eth_ag_context cstorm_ag_context;
-	struct ustorm_eth_ag_context ustorm_ag_context;
-	struct timers_block_context timers_context;
-	struct xstorm_eth_st_context xstorm_st_context;
-	struct cstorm_eth_st_context cstorm_st_context;
-};
-
-
-/*
- * Ethernet doorbell
- */
-struct eth_tx_doorbell {
-#if defined(__BIG_ENDIAN)
-	u16 npackets;
-	u8 params;
-#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
-#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
-#define ETH_TX_DOORBELL_SPARE (0x1<<7)
-#define ETH_TX_DOORBELL_SPARE_SHIFT 7
-	struct doorbell_hdr hdr;
-#elif defined(__LITTLE_ENDIAN)
-	struct doorbell_hdr hdr;
-	u8 params;
-#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
-#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
-#define ETH_TX_DOORBELL_SPARE (0x1<<7)
-#define ETH_TX_DOORBELL_SPARE_SHIFT 7
-	u16 npackets;
-#endif
-};
-
-
-/*
- * client init fc data
- */
-struct client_init_fc_data {
-	__le16 cqe_pause_thr_low;
-	__le16 cqe_pause_thr_high;
-	__le16 bd_pause_thr_low;
-	__le16 bd_pause_thr_high;
-	__le16 sge_pause_thr_low;
-	__le16 sge_pause_thr_high;
-	__le16 rx_cos_mask;
-	u8 safc_group_num;
-	u8 safc_group_en_flg;
-	u8 traffic_type;
-	u8 reserved0;
-	__le16 reserved1;
-	__le32 reserved2;
-};
-
-
-/*
- * client init ramrod data
- */
-struct client_init_general_data {
-	u8 client_id;
-	u8 statistics_counter_id;
-	u8 statistics_en_flg;
-	u8 is_fcoe_flg;
-	u8 activate_flg;
-	u8 sp_client_id;
-	__le16 reserved0;
-	__le32 reserved1[2];
-};
-
-
-/*
- * client init rx data
- */
-struct client_init_rx_data {
-	u8 tpa_en_flg;
-	u8 vmqueue_mode_en_flg;
-	u8 extra_data_over_sgl_en_flg;
-	u8 cache_line_alignment_log_size;
-	u8 enable_dynamic_hc;
-	u8 max_sges_for_packet;
-	u8 client_qzone_id;
-	u8 drop_ip_cs_err_flg;
-	u8 drop_tcp_cs_err_flg;
-	u8 drop_ttl0_flg;
-	u8 drop_udp_cs_err_flg;
-	u8 inner_vlan_removal_enable_flg;
-	u8 outer_vlan_removal_enable_flg;
-	u8 status_block_id;
-	u8 rx_sb_index_number;
-	u8 reserved0[3];
-	__le16 bd_buff_size;
-	__le16 sge_buff_size;
-	__le16 mtu;
-	struct regpair bd_page_base;
-	struct regpair sge_page_base;
-	struct regpair cqe_page_base;
-	u8 is_leading_rss;
-	u8 is_approx_mcast;
-	__le16 max_agg_size;
-	__le32 reserved2[3];
-};
-
-/*
- * client init tx data
- */
-struct client_init_tx_data {
-	u8 enforce_security_flg;
-	u8 tx_status_block_id;
-	u8 tx_sb_index_number;
-	u8 reserved0;
-	__le16 mtu;
-	__le16 reserved1;
-	struct regpair tx_bd_page_base;
-	__le32 reserved2[2];
-};
-
-/*
- * client init ramrod data
- */
-struct client_init_ramrod_data {
-	struct client_init_general_data general;
-	struct client_init_rx_data rx;
-	struct client_init_tx_data tx;
-	struct client_init_fc_data fc;
-};
-
-
-/*
- * The data contain client ID need to the ramrod
- */
-struct eth_common_ramrod_data {
-	u32 client_id;
-	u32 reserved1;
-};
-
-
-/*
- * union for sgl and raw data.
- */
-union eth_sgl_or_raw_data {
-	__le16 sgl[8];
-	u32 raw_data[4];
-};
-
-/*
- * regular eth FP CQE parameters struct
- */
-struct eth_fast_path_rx_cqe {
-	u8 type_error_flags;
-#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0)
-#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1)
-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1
-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2)
-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2
-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3)
-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3
-#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4)
-#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
-#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
-#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
-#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
-	u8 status_flags;
-#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
-#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
-#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
-#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
-#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
-#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
-#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
-#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
-#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
-#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
-	u8 placement_offset;
-	u8 queue_index;
-	__le32 rss_hash_result;
-	__le16 vlan_tag;
-	__le16 pkt_len;
-	__le16 len_on_bd;
-	struct parsing_flags pars_flags;
-	union eth_sgl_or_raw_data sgl_or_raw_data;
-};
-
-
-/*
- * The data for RSS setup ramrod
- */
-struct eth_halt_ramrod_data {
-	u32 client_id;
-	u32 reserved0;
-};
-
-/*
- * The data for statistics query ramrod
- */
-struct common_query_ramrod_data {
-#if defined(__BIG_ENDIAN)
-	u8 reserved0;
-	u8 collect_port;
-	u16 drv_counter;
-#elif defined(__LITTLE_ENDIAN)
-	u16 drv_counter;
-	u8 collect_port;
-	u8 reserved0;
-#endif
-	u32 ctr_id_vector;
-};
-
-
-/*
- * Place holder for ramrods protocol specific data
- */
-struct ramrod_data {
-	__le32 data_lo;
-	__le32 data_hi;
-};
-
-/*
- * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
- */
-union eth_ramrod_data {
-	struct ramrod_data general;
-};
-
-
-/*
- * Eth Rx Cqe structure- general structure for ramrods
- */
-struct common_ramrod_eth_rx_cqe {
-	u8 ramrod_type;
-#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
-#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
-#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
-#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
-	u8 conn_type;
-	__le16 reserved1;
-	__le32 conn_and_cmd_data;
-#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
-#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
-#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
-#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
-	struct ramrod_data protocol_data;
-	__le32 reserved2[4];
-};
-
-/*
- * Rx Last CQE in page (in ETH)
- */
-struct eth_rx_cqe_next_page {
-	__le32 addr_lo;
-	__le32 addr_hi;
-	__le32 reserved[6];
-};
-
-/*
- * union for all eth rx cqe types (fix their sizes)
- */
-union eth_rx_cqe {
-	struct eth_fast_path_rx_cqe fast_path_cqe;
-	struct common_ramrod_eth_rx_cqe ramrod_cqe;
-	struct eth_rx_cqe_next_page next_page_cqe;
-};
-
-
-/*
- * common data for all protocols
- */
-struct spe_hdr {
-	__le32 conn_and_cmd_data;
-#define SPE_HDR_CID (0xFFFFFF<<0)
-#define SPE_HDR_CID_SHIFT 0
-#define SPE_HDR_CMD_ID (0xFF<<24)
-#define SPE_HDR_CMD_ID_SHIFT 24
-	__le16 type;
-#define SPE_HDR_CONN_TYPE (0xFF<<0)
-#define SPE_HDR_CONN_TYPE_SHIFT 0
-#define SPE_HDR_FUNCTION_ID (0xFF<<8)
-#define SPE_HDR_FUNCTION_ID_SHIFT 8
-	__le16 reserved1;
-};
-
-/*
- * Ethernet slow path element
- */
-union eth_specific_data {
-	u8 protocol_data[8];
-	struct regpair client_init_ramrod_init_data;
-	struct eth_halt_ramrod_data halt_ramrod_data;
-	struct regpair update_data_addr;
-	struct eth_common_ramrod_data common_ramrod_data;
-};
-
-/*
- * Ethernet slow path element
- */
-struct eth_spe {
-	struct spe_hdr hdr;
-	union eth_specific_data data;
-};
-
-
 /*
  * array of 13 bds as appears in the eth xstorm context
  */
@@ -2865,86 +3972,25 @@
 
 
 /*
- * Common configuration parameters per function in Tstorm
+ * VLAN mode on TX BDs
  */
-struct tstorm_eth_function_common_config {
-#if defined(__BIG_ENDIAN)
-	u8 reserved1;
-	u8 rss_result_mask;
-	u16 config_flags;
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
-	u16 config_flags;
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
-	u8 rss_result_mask;
-	u8 reserved1;
-#endif
-	u16 vlan_id[2];
+enum eth_tx_vlan_type {
+	X_ETH_NO_VLAN,
+	X_ETH_OUTBAND_VLAN,
+	X_ETH_INBAND_VLAN,
+	X_ETH_FW_ADDED_VLAN,
+	MAX_ETH_TX_VLAN_TYPE
 };
 
-/*
- * RSS idirection table update configuration
- */
-struct rss_update_config {
-#if defined(__BIG_ENDIAN)
-	u16 toe_rss_bitmap;
-	u16 flags;
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
-#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
-#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
-#elif defined(__LITTLE_ENDIAN)
-	u16 flags;
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
-#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
-#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
-	u16 toe_rss_bitmap;
-#endif
-	u32 reserved1;
-};
 
 /*
- * parameters for eth update ramrod
+ * Ethernet VLAN filtering mode in E1x
  */
-struct eth_update_ramrod_data {
-	struct tstorm_eth_function_common_config func_config;
-	u8 indirectionTable[128];
-	struct rss_update_config rss_config;
+enum eth_vlan_filter_mode {
+	ETH_VLAN_FILTER_ANY_VLAN,
+	ETH_VLAN_FILTER_SPECIFIC_VLAN,
+	ETH_VLAN_FILTER_CLASSIFY,
+	MAX_ETH_VLAN_FILTER_MODE
 };
 
 
@@ -2954,9 +4000,8 @@
 struct mac_configuration_hdr {
 	u8 length;
 	u8 offset;
-	u16 client_id;
-	u16 echo;
-	u16 reserved1;
+	__le16 client_id;
+	__le32 echo;
 };
 
 /*
@@ -2981,8 +4026,8 @@
 #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
 #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
 #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
-	u16 reserved0;
-	u32 clients_bit_vector;
+	__le16 reserved0;
+	__le32 clients_bit_vector;
 };
 
 /*
@@ -2995,6 +4040,36 @@
 
 
 /*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+	T_ETH_MAC_COMMAND_INVALIDATE,
+	T_ETH_MAC_COMMAND_SET,
+	MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+	u8 update_ipv4;
+	u8 update_ipv6;
+	u8 client_id;
+	u8 max_tpa_queues;
+	u8 max_sges_for_packet;
+	u8 complete_on_both_clients;
+	__le16 reserved1;
+	__le16 sge_buff_size;
+	__le16 max_agg_size;
+	__le32 sge_page_base_lo;
+	__le32 sge_page_base_hi;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+};
+
+
+/*
  * approximate-match multicast filtering for E1H per function in Tstorm
  */
 struct tstorm_eth_approximate_match_multicast_filtering {
@@ -3003,35 +4078,50 @@
 
 
 /*
- * MAC filtering configuration parameters per port in Tstorm
+ * Common configuration parameters per function in Tstorm
  */
-struct tstorm_eth_mac_filter_config {
-	u32 ucast_drop_all;
-	u32 ucast_accept_all;
-	u32 mcast_drop_all;
-	u32 mcast_accept_all;
-	u32 bcast_drop_all;
-	u32 bcast_accept_all;
-	u32 vlan_filter[2];
-	u32 unmatched_unicast;
-	u32 reserved;
+struct tstorm_eth_function_common_config {
+	__le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+	u8 rss_result_mask;
+	u8 reserved1;
+	__le16 vlan_id[2];
 };
 
 
 /*
- * common flag to indicate existence of TPA.
+ * MAC filtering configuration parameters per port in Tstorm
  */
-struct tstorm_eth_tpa_exist {
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 reserved0;
-	u8 tpa_exist;
-#elif defined(__LITTLE_ENDIAN)
-	u8 tpa_exist;
-	u8 reserved0;
-	u16 reserved1;
-#endif
-	u32 reserved2;
+struct tstorm_eth_mac_filter_config {
+	__le32 ucast_drop_all;
+	__le32 ucast_accept_all;
+	__le32 mcast_drop_all;
+	__le32 mcast_accept_all;
+	__le32 bcast_accept_all;
+	__le32 vlan_filter[2];
+	__le32 unmatched_unicast;
+};
+
+
+/*
+ * tx only queue init ramrod data
+ */
+struct tx_queue_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_tx_data tx;
 };
 
 
@@ -3061,10 +4151,8 @@
  */
 struct cfc_del_event_data {
 	u32 cid;
-	u8 error;
-	u8 reserved0;
-	u16 reserved1;
-	u32 reserved2;
+	u32 reserved0;
+	u32 reserved1;
 };
 
 
@@ -3072,22 +4160,18 @@
  * per-port SAFC demo variables
  */
 struct cmng_flags_per_port {
-	u8 con_number[NUM_OF_PROTOCOLS];
 	u32 cmng_enables;
 #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
 #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
 #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
 #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2
-#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3)
-#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
-#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
-#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+	u32 __reserved1;
 };
 
 
@@ -3106,6 +4190,7 @@
 	u32 upper_bound;
 	u32 fair_threshold;
 	u32 fairness_timeout;
+	u32 reserved0;
 };
 
 /*
@@ -3122,65 +4207,65 @@
 	u16 __reserved1;
 #endif
 	u8 cos_to_traffic_types[MAX_COS_NUMBER];
-	u32 __reserved2;
 	u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
 };
 
 /*
- * per-port PFC variables
- */
-struct pfc_struct_per_port {
-	u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
-#if defined(__BIG_ENDIAN)
-	u16 pfc_pause_quanta_in_nanosec;
-	u8 __reserved0;
-	u8 priority_non_pausable_mask;
-#elif defined(__LITTLE_ENDIAN)
-	u8 priority_non_pausable_mask;
-	u8 __reserved0;
-	u16 pfc_pause_quanta_in_nanosec;
-#endif
-};
-
-/*
- * Priority and cos
- */
-struct priority_cos {
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 cos;
-	u8 priority;
-#elif defined(__LITTLE_ENDIAN)
-	u8 priority;
-	u8 cos;
-	u16 reserved1;
-#endif
-	u32 reserved2;
-};
-
-/*
  * Per-port congestion management variables
  */
 struct cmng_struct_per_port {
 	struct rate_shaping_vars_per_port rs_vars;
 	struct fairness_vars_per_port fair_vars;
 	struct safc_struct_per_port safc_vars;
-	struct pfc_struct_per_port pfc_vars;
-#if defined(__BIG_ENDIAN)
-	u16 __reserved1;
-	u8 dcb_enabled;
-	u8 llfc_mode;
-#elif defined(__LITTLE_ENDIAN)
-	u8 llfc_mode;
-	u8 dcb_enabled;
-	u16 __reserved1;
-#endif
-	struct priority_cos
-		traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
 	struct cmng_flags_per_port flags;
 };
 
 
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id {
+	RAMROD_CMD_ID_COMMON_UNUSED,
+	RAMROD_CMD_ID_COMMON_FUNCTION_START,
+	RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+	RAMROD_CMD_ID_COMMON_CFC_DEL,
+	RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+	RAMROD_CMD_ID_COMMON_STAT_QUERY,
+	RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_RESERVED1,
+	RAMROD_CMD_ID_COMMON_RESERVED2,
+	MAX_COMMON_SPQE_CMD_ID
+};
+
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type {
+	ETH_CONNECTION_TYPE,
+	TOE_CONNECTION_TYPE,
+	RDMA_CONNECTION_TYPE,
+	ISCSI_CONNECTION_TYPE,
+	FCOE_CONNECTION_TYPE,
+	RESERVED_CONNECTION_TYPE_0,
+	RESERVED_CONNECTION_TYPE_1,
+	RESERVED_CONNECTION_TYPE_2,
+	NONE_CONNECTION_TYPE,
+	MAX_CONNECTION_TYPE
+};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode {
+	OVERRIDE_COS,
+	STATIC_COS,
+	FW_WRR,
+	MAX_COS_MODE
+};
+
 
 /*
  * Dynamic HC counters set by the driver
@@ -3197,10 +4282,58 @@
 	struct regpair reserved[2];
 };
 
+
 /*
- * Dynamic host coalescing init parameters
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
  */
-struct dynamic_hc_config {
+struct vf_pf_channel_zone_data {
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone {
+	struct vf_pf_channel_zone_data vf_pf_channel;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger {
+	u8 addr_valid;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone {
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u8 reserved0;
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+	u8 reserved0;
+	u16 reserved1;
+#endif
+	u32 reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data {
+	struct non_trigger_vf_zone non_trigger;
+	struct trigger_vf_zone trigger;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config {
 	u32 threshold[3];
 	u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
 	u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
@@ -3209,114 +4342,114 @@
 	u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
 };
 
-
 /*
- * Protocol-common statistics collected by the Xstorm (per client)
+ * Dynamic host coalescing init parameters
  */
-struct xstorm_per_client_stats {
-	__le32 reserved0;
-	__le32 unicast_pkts_sent;
-	struct regpair unicast_bytes_sent;
-	struct regpair multicast_bytes_sent;
-	__le32 multicast_pkts_sent;
-	__le32 broadcast_pkts_sent;
-	struct regpair broadcast_bytes_sent;
-	__le16 stats_counter;
-	__le16 reserved1;
-	__le32 reserved2;
+struct dynamic_hc_config {
+	struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
 };
 
-/*
- * Common statistics collected by the Xstorm (per port)
- */
-struct xstorm_common_stats {
-	struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
-};
 
-/*
- * Protocol-common statistics collected by the Tstorm (per port)
- */
-struct tstorm_per_port_stats {
-	__le32 mac_filter_discard;
-	__le32 xxoverflow_discard;
-	__le32 brb_truncate_discard;
-	__le32 mac_discard;
-};
-
-/*
- * Protocol-common statistics collected by the Tstorm (per client)
- */
-struct tstorm_per_client_stats {
-	struct regpair rcv_unicast_bytes;
-	struct regpair rcv_broadcast_bytes;
-	struct regpair rcv_multicast_bytes;
-	struct regpair rcv_error_bytes;
-	__le32 checksum_discard;
-	__le32 packets_too_big_discard;
-	__le32 rcv_unicast_pkts;
-	__le32 rcv_broadcast_pkts;
-	__le32 rcv_multicast_pkts;
-	__le32 no_buff_discard;
-	__le32 ttl0_discard;
-	__le16 stats_counter;
-	__le16 reserved0;
-};
-
-/*
- * Protocol-common statistics collected by the Tstorm
- */
-struct tstorm_common_stats {
-	struct tstorm_per_port_stats port_statistics;
-	struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
-};
-
-/*
- * Protocol-common statistics collected by the Ustorm (per client)
- */
-struct ustorm_per_client_stats {
-	struct regpair ucast_no_buff_bytes;
-	struct regpair mcast_no_buff_bytes;
-	struct regpair bcast_no_buff_bytes;
-	__le32 ucast_no_buff_pkts;
-	__le32 mcast_no_buff_pkts;
-	__le32 bcast_no_buff_pkts;
-	__le16 stats_counter;
-	__le16 reserved0;
-};
-
-/*
- * Protocol-common statistics collected by the Ustorm
- */
-struct ustorm_common_stats {
-	struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
-};
-
-/*
- * Eth statistics query structure for the eth_stats_query ramrod
- */
-struct eth_stats_query {
-	struct xstorm_common_stats xstorm_common;
-	struct tstorm_common_stats tstorm_common;
-	struct ustorm_common_stats ustorm_common;
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+	u8 cos;
+	u8 voq;
+	u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+	u8 pbf_queue;
+	u8 voq;
+	u8 cos;
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ramEn;
+	u8 reserved2;
+	u16 reserved3;
+#endif
 };
 
 
 /*
  * set mac event data
  */
-struct set_mac_event_data {
-	u16 echo;
-	u16 reserved0;
+struct eth_event_data {
+	u32 echo;
+	u32 reserved0;
 	u32 reserved1;
+};
+
+
+/*
+ * pf-vf event data
+ */
+struct vf_pf_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
 	u32 reserved2;
+	u32 reserved3;
+};
+
+/*
+ * malicious VF event data
+ */
+struct malicious_vf_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 reserved2;
+	u32 reserved3;
 };
 
 /*
  * union for all event ring message types
  */
 union event_data {
-	struct set_mac_event_data set_mac_event;
+	struct vf_pf_event_data vf_pf_event;
+	struct eth_event_data eth_event;
 	struct cfc_del_event_data cfc_del_event;
+	struct vf_flr_event_data vf_flr_event;
+	struct malicious_vf_event_data malicious_vf_event;
 };
 
 
@@ -3343,7 +4476,7 @@
  */
 struct event_ring_msg {
 	u8 opcode;
-	u8 reserved0;
+	u8 error;
 	u16 reserved1;
 	union event_data data;
 };
@@ -3366,32 +4499,82 @@
 
 
 /*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+	EVENT_RING_OPCODE_VF_PF_CHANNEL,
+	EVENT_RING_OPCODE_FUNCTION_START,
+	EVENT_RING_OPCODE_FUNCTION_STOP,
+	EVENT_RING_OPCODE_CFC_DEL,
+	EVENT_RING_OPCODE_CFC_DEL_WB,
+	EVENT_RING_OPCODE_STAT_QUERY,
+	EVENT_RING_OPCODE_STOP_TRAFFIC,
+	EVENT_RING_OPCODE_START_TRAFFIC,
+	EVENT_RING_OPCODE_VF_FLR,
+	EVENT_RING_OPCODE_MALICIOUS_VF,
+	EVENT_RING_OPCODE_FORWARD_SETUP,
+	EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+	EVENT_RING_OPCODE_RESERVED1,
+	EVENT_RING_OPCODE_RESERVED2,
+	EVENT_RING_OPCODE_SET_MAC,
+	EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+	EVENT_RING_OPCODE_FILTERS_RULES,
+	EVENT_RING_OPCODE_MULTICAST_RULES,
+	MAX_EVENT_RING_OPCODE
+};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+	FAIRNESS_COS_WRR_MODE,
+	FAIRNESS_COS_ETS_MODE,
+	MAX_FAIRNESS_MODE
+};
+
+
+/*
  * per-vnic fairness variables
  */
 struct fairness_vars_per_vn {
 	u32 cos_credit_delta[MAX_COS_NUMBER];
-	u32 protocol_credit_delta[NUM_OF_PROTOCOLS];
 	u32 vn_credit_delta;
 	u32 __reserved0;
 };
 
 
 /*
+ * Priority and cos
+ */
+struct priority_cos {
+	u8 priority;
+	u8 cos;
+	__le16 reserved1;
+};
+
+/*
  * The data for flow control configuration
  */
 struct flow_control_configuration {
-	struct priority_cos
-		traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 dcb_version;
-	u8 dcb_enabled;
-#elif defined(__LITTLE_ENDIAN)
+	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 	u8 dcb_enabled;
 	u8 dcb_version;
-	u16 reserved1;
-#endif
-	u32 reserved2;
+	u8 dont_add_pri_0_en;
+	u8 reserved1;
+	__le32 reserved2;
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+	__le16 function_mode;
+	__le16 sd_vlan_tag;
+	u16 reserved;
+	u8 path_id;
+	u8 network_cos_mode;
 };
 
 
@@ -3504,13 +4687,13 @@
 	struct pci_entity p_func;
 #if defined(__BIG_ENDIAN)
 	u8 rsrv0;
+	u8 state;
 	u8 dhc_qzone_id;
-	u8 __dynamic_hc_level;
 	u8 same_igu_sb_1b;
 #elif defined(__LITTLE_ENDIAN)
 	u8 same_igu_sb_1b;
-	u8 __dynamic_hc_level;
 	u8 dhc_qzone_id;
+	u8 state;
 	u8 rsrv0;
 #endif
 	struct regpair rsrv1[2];
@@ -3518,18 +4701,30 @@
 
 
 /*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+	HC_REGULAR_SEGMENT,
+	HC_DEFAULT_SEGMENT,
+	MAX_HC_SEGMENT
+};
+
+
+/*
  * The fast-path status block meta-data
  */
 struct hc_sp_status_block_data {
 	struct regpair host_sb_addr;
 #if defined(__BIG_ENDIAN)
-	u16 rsrv;
+	u8 rsrv1;
+	u8 state;
 	u8 igu_seg_id;
 	u8 igu_sb_id;
 #elif defined(__LITTLE_ENDIAN)
 	u8 igu_sb_id;
 	u8 igu_seg_id;
-	u16 rsrv;
+	u8 state;
+	u8 rsrv1;
 #endif
 	struct pci_entity p_func;
 };
@@ -3554,6 +4749,129 @@
 
 
 /*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+	HC_IGU_BC_MODE,
+	HC_IGU_NBC_MODE,
+	MAX_IGU_MODE
+};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+	IP_V4,
+	IP_V6,
+	MAX_IP_VER
+};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+	SINGLE_FUNCTION,
+	MULTI_FUNCTION_SD,
+	MULTI_FUNCTION_SI,
+	MULTI_FUNCTION_RESERVED,
+	MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+	struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+	struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+	__le32 mac_discard;
+	__le32 mac_filter_discard;
+	__le32 brb_truncate_discard;
+	__le32 mf_tag_discard;
+	__le32 packet_drop;
+	__le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+	struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+	struct regpair rcv_ucast_bytes;
+	__le32 rcv_ucast_pkts;
+	__le32 checksum_discard;
+	struct regpair rcv_bcast_bytes;
+	__le32 rcv_bcast_pkts;
+	__le32 pkts_too_big_discard;
+	struct regpair rcv_mcast_bytes;
+	__le32 rcv_mcast_pkts;
+	__le32 ttl0_discard;
+	__le16 no_buff_discard;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+	struct regpair ucast_no_buff_bytes;
+	struct regpair mcast_no_buff_bytes;
+	struct regpair bcast_no_buff_bytes;
+	__le32 ucast_no_buff_pkts;
+	__le32 mcast_no_buff_pkts;
+	__le32 bcast_no_buff_pkts;
+	__le32 coalesced_pkts;
+	struct regpair coalesced_bytes;
+	__le32 coalesced_events;
+	__le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+	struct regpair ucast_bytes_sent;
+	struct regpair mcast_bytes_sent;
+	struct regpair bcast_bytes_sent;
+	__le32 ucast_pkts_sent;
+	__le32 mcast_pkts_sent;
+	__le32 bcast_pkts_sent;
+	__le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+	struct tstorm_per_queue_stats tstorm_queue_statistics;
+	struct ustorm_per_queue_stats ustorm_queue_statistics;
+	struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
  * FW version stored in first line of pram
  */
 struct pram_fw_version {
@@ -3582,7 +4900,6 @@
 	u8 protocol_data[8];
 	struct regpair phy_address;
 	struct regpair mac_config_addr;
-	struct common_query_ramrod_data query_ramrod_data;
 };
 
 /*
@@ -3613,7 +4930,6 @@
  * per-vnic rate shaping variables
  */
 struct rate_shaping_vars_per_vn {
-	struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS];
 	struct rate_shaping_counter vn_counter;
 };
 
@@ -3628,39 +4944,100 @@
 
 
 /*
- * eth/toe flags that indicate if to query
+ * Protocol-common statistics counter
  */
-struct stats_indication_flags {
-	u32 collect_eth;
-	u32 collect_toe;
+struct stats_counter {
+	__le16 xstats_counter;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le16 tstats_counter;
+	__le16 reserved2;
+	__le32 reserved3;
+	__le16 ustats_counter;
+	__le16 reserved4;
+	__le32 reserved5;
+	__le16 cstats_counter;
+	__le16 reserved6;
+	__le32 reserved7;
 };
 
 
 /*
- * per-port PFC variables
+ *
  */
-struct storm_pfc_struct_per_port {
-#if defined(__BIG_ENDIAN)
-	u16 mid_mac_addr;
-	u16 msb_mac_addr;
-#elif defined(__LITTLE_ENDIAN)
-	u16 msb_mac_addr;
-	u16 mid_mac_addr;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 pfc_pause_quanta_in_nanosec;
-	u16 lsb_mac_addr;
-#elif defined(__LITTLE_ENDIAN)
-	u16 lsb_mac_addr;
-	u16 pfc_pause_quanta_in_nanosec;
-#endif
+struct stats_query_entry {
+	u8 kind;
+	u8 index;
+	__le16 funcID;
+	__le32 reserved;
+	struct regpair address;
 };
 
 /*
- * Per-port congestion management variables
+ * statistic command
  */
-struct storm_cmng_struct_per_port {
-	struct storm_pfc_struct_per_port pfc_vars;
+struct stats_query_cmd_group {
+	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+	u8 cmd_num;
+	u8 reserved0;
+	__le16 drv_stats_counter;
+	__le32 reserved1;
+	struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+	STATS_TYPE_QUEUE,
+	STATS_TYPE_PORT,
+	STATS_TYPE_PF,
+	STATS_TYPE_TOE,
+	STATS_TYPE_FCOE,
+	MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+	SB_DISABLED,
+	SB_ENABLED,
+	SB_CLEANED,
+	MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+	USTORM_ID,
+	CSTORM_ID,
+	XSTORM_ID,
+	TSTORM_ID,
+	ATTENTION_ID,
+	MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+	LLFC_TRAFFIC_TYPE_NW,
+	LLFC_TRAFFIC_TYPE_FCOE,
+	LLFC_TRAFFIC_TYPE_ISCSI,
+	MAX_TRAFFIC_TYPE
 };
 
 
@@ -3715,6 +5092,16 @@
 
 
 /*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+	VF_PF_CHANNEL_STATE_READY,
+	VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+	MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
  * zone A per-queue data
  */
 struct xstorm_queue_zone_data {
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index d539920..df9f196 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -15,98 +15,34 @@
 #ifndef BNX2X_INIT_H
 #define BNX2X_INIT_H
 
-/* RAM0 size in bytes */
-#define STORM_INTMEM_SIZE_E1		0x5800
-#define STORM_INTMEM_SIZE_E1H		0x10000
-#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
-						    STORM_INTMEM_SIZE_E1H) / 4)
-
-
 /* Init operation types and structures */
-/* Common for both E1 and E1H */
-#define OP_RD			0x1 /* read single register */
-#define OP_WR			0x2 /* write single register */
-#define OP_IW			0x3 /* write single register using mailbox */
-#define OP_SW			0x4 /* copy a string to the device */
-#define OP_SI			0x5 /* copy a string using mailbox */
-#define OP_ZR			0x6 /* clear memory */
-#define OP_ZP			0x7 /* unzip then copy with DMAE */
-#define OP_WR_64		0x8 /* write 64 bit pattern */
-#define OP_WB			0x9 /* copy a string using DMAE */
+enum {
+	OP_RD = 0x1,	/* read a single register */
+	OP_WR,		/* write a single register */
+	OP_SW,		/* copy a string to the device */
+	OP_ZR,		/* clear memory */
+	OP_ZP,		/* unzip then copy with DMAE */
+	OP_WR_64,	/* write 64 bit pattern */
+	OP_WB,		/* copy a string using DMAE */
+	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
+	/* Skip the following ops if all of the init modes don't match */
+	OP_IF_MODE_OR,
+	/* Skip the following ops if any of the init modes don't match */
+	OP_IF_MODE_AND,
+	OP_MAX
+};
 
-/* FPGA and EMUL specific operations */
-#define OP_WR_EMUL		0xa /* write single register on Emulation */
-#define OP_WR_FPGA		0xb /* write single register on FPGA */
-#define OP_WR_ASIC		0xc /* write single register on ASIC */
-
-/* Init stages */
-/* Never reorder stages !!! */
-#define COMMON_STAGE		0
-#define PORT0_STAGE		1
-#define PORT1_STAGE		2
-#define FUNC0_STAGE		3
-#define FUNC1_STAGE		4
-#define FUNC2_STAGE		5
-#define FUNC3_STAGE		6
-#define FUNC4_STAGE		7
-#define FUNC5_STAGE		8
-#define FUNC6_STAGE		9
-#define FUNC7_STAGE		10
-#define STAGE_IDX_MAX		11
-
-#define STAGE_START		0
-#define STAGE_END		1
-
-
-/* Indices of blocks */
-#define PRS_BLOCK		0
-#define SRCH_BLOCK		1
-#define TSDM_BLOCK		2
-#define TCM_BLOCK		3
-#define BRB1_BLOCK		4
-#define TSEM_BLOCK		5
-#define PXPCS_BLOCK		6
-#define EMAC0_BLOCK		7
-#define EMAC1_BLOCK		8
-#define DBU_BLOCK		9
-#define MISC_BLOCK		10
-#define DBG_BLOCK		11
-#define NIG_BLOCK		12
-#define MCP_BLOCK		13
-#define UPB_BLOCK		14
-#define CSDM_BLOCK		15
-#define USDM_BLOCK		16
-#define CCM_BLOCK		17
-#define UCM_BLOCK		18
-#define USEM_BLOCK		19
-#define CSEM_BLOCK		20
-#define XPB_BLOCK		21
-#define DQ_BLOCK		22
-#define TIMERS_BLOCK		23
-#define XSDM_BLOCK		24
-#define QM_BLOCK		25
-#define PBF_BLOCK		26
-#define XCM_BLOCK		27
-#define XSEM_BLOCK		28
-#define CDU_BLOCK		29
-#define DMAE_BLOCK		30
-#define PXP_BLOCK		31
-#define CFC_BLOCK		32
-#define HC_BLOCK		33
-#define PXP2_BLOCK		34
-#define MISC_AEU_BLOCK		35
-#define PGLUE_B_BLOCK		36
-#define IGU_BLOCK		37
-#define ATC_BLOCK		38
-#define QM_4PORT_BLOCK		39
-#define XSEM_4PORT_BLOCK		40
-
+enum {
+	STAGE_START,
+	STAGE_END,
+};
 
 /* Returns the index of start or end of a specific block stage in ops array*/
 #define BLOCK_OPS_IDX(block, stage, end) \
-			(2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
+	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
 
 
+/* structs for the various opcodes */
 struct raw_op {
 	u32 op:8;
 	u32 offset:24;
@@ -116,7 +52,7 @@
 struct op_read {
 	u32 op:8;
 	u32 offset:24;
-	u32 pad;
+	u32 val;
 };
 
 struct op_write {
@@ -125,15 +61,15 @@
 	u32 val;
 };
 
-struct op_string_write {
+struct op_arr_write {
 	u32 op:8;
 	u32 offset:24;
-#ifdef __LITTLE_ENDIAN
-	u16 data_off;
-	u16 data_len;
-#else /* __BIG_ENDIAN */
+#ifdef __BIG_ENDIAN
 	u16 data_len;
 	u16 data_off;
+#else /* __LITTLE_ENDIAN */
+	u16 data_off;
+	u16 data_len;
 #endif
 };
 
@@ -143,14 +79,210 @@
 	u32 len;
 };
 
+struct op_if_mode {
+	u32 op:8;
+	u32 cmd_offset:24;
+	u32 mode_bit_map;
+};
+
+
 union init_op {
 	struct op_read		read;
 	struct op_write		write;
-	struct op_string_write	str_wr;
+	struct op_arr_write	arr_wr;
 	struct op_zero		zero;
 	struct raw_op		raw;
+	struct op_if_mode	if_mode;
 };
 
+
+/* Init Phases */
+enum {
+	PHASE_COMMON,
+	PHASE_PORT0,
+	PHASE_PORT1,
+	PHASE_PF0,
+	PHASE_PF1,
+	PHASE_PF2,
+	PHASE_PF3,
+	PHASE_PF4,
+	PHASE_PF5,
+	PHASE_PF6,
+	PHASE_PF7,
+	NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+	MODE_ASIC                      = 0x00000001,
+	MODE_FPGA                      = 0x00000002,
+	MODE_EMUL                      = 0x00000004,
+	MODE_E2                        = 0x00000008,
+	MODE_E3                        = 0x00000010,
+	MODE_PORT2                     = 0x00000020,
+	MODE_PORT4                     = 0x00000040,
+	MODE_SF                        = 0x00000080,
+	MODE_MF                        = 0x00000100,
+	MODE_MF_SD                     = 0x00000200,
+	MODE_MF_SI                     = 0x00000400,
+	MODE_MF_NIV                    = 0x00000800,
+	MODE_E3_A0                     = 0x00001000,
+	MODE_E3_B0                     = 0x00002000,
+	MODE_COS_BC                    = 0x00004000,
+	MODE_COS3                      = 0x00008000,
+	MODE_COS6                      = 0x00010000,
+	MODE_LITTLE_ENDIAN             = 0x00020000,
+	MODE_BIG_ENDIAN                = 0x00040000,
+};
+
+/* Init Blocks */
+enum {
+	BLOCK_ATC,
+	BLOCK_BRB1,
+	BLOCK_CCM,
+	BLOCK_CDU,
+	BLOCK_CFC,
+	BLOCK_CSDM,
+	BLOCK_CSEM,
+	BLOCK_DBG,
+	BLOCK_DMAE,
+	BLOCK_DORQ,
+	BLOCK_HC,
+	BLOCK_IGU,
+	BLOCK_MISC,
+	BLOCK_NIG,
+	BLOCK_PBF,
+	BLOCK_PGLUE_B,
+	BLOCK_PRS,
+	BLOCK_PXP2,
+	BLOCK_PXP,
+	BLOCK_QM,
+	BLOCK_SRC,
+	BLOCK_TCM,
+	BLOCK_TM,
+	BLOCK_TSDM,
+	BLOCK_TSEM,
+	BLOCK_UCM,
+	BLOCK_UPB,
+	BLOCK_USDM,
+	BLOCK_USEM,
+	BLOCK_XCM,
+	BLOCK_XPB,
+	BLOCK_XSDM,
+	BLOCK_XSEM,
+	BLOCK_MISC_AEU,
+	NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q		0
+#define BNX2X_TOE_Q		3
+#define BNX2X_TOE_ACK_Q		6
+#define BNX2X_ISCSI_Q		9
+#define BNX2X_ISCSI_ACK_Q	8
+#define BNX2X_FCOE_Q		10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+	((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+	/* find current COS mapping */
+	u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+	/* check if queue->COS mapping has changed */
+	if (curr_cos != new_cos) {
+		u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+		u32 reg_addr, reg_bit_map, vnic;
+
+		/* update parameters for 4port mode */
+		if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+			num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+			if (BP_PORT(bp)) {
+				curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+				new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+			}
+		}
+
+		/* change queue mapping for each VNIC */
+		for (vnic = 0; vnic < num_vnics; vnic++) {
+			u32 pf_q_num =
+				BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+			u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+			/* overwrite queue->VOQ mapping */
+			REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+			/* clear queue bit from current COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+			/* set queue bit in new COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+			/* set/clear queue bit in command-queue bit map
+			(E2/E3A0 only, valid COS values are 0/1) */
+			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+				reg_bit_map = REG_RD(bp, reg_addr);
+				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+				reg_bit_map = new_cos ?
+					      (reg_bit_map | q_bit_map) :
+					      (reg_bit_map & (~q_bit_map));
+				REG_WR(bp, reg_addr, reg_bit_map);
+			}
+		}
+	}
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp,
+				       struct priority_cos *traffic_cos)
+{
+	bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) {
+		/* required only in backward compatible COS mode */
+		bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	}
+}
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
 #define INITOP_SET		0	/* set the HW directly */
 #define INITOP_CLEAR		1	/* clear the HW directly */
 #define INITOP_INIT		2	/* set the init-value array */
@@ -245,12 +377,15 @@
 	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
 	BLOCK_PRTY_INFO_1(PXP2,	0x7ff, 0x7f, 0x7f, 0x7ff),
 	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0),
+	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff),
+	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xffff),
 	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
 	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
 	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
 	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
 	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
-		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
 		{0xf, 0xf, 0xf}, "UPB"},
 	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
 		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
@@ -262,10 +397,16 @@
 	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
 	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
 	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffffff),
+	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f),
 	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
 	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
 	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
 	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff),
 	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
 	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
 	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index aafd023..7ec1724 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -15,13 +15,39 @@
 #ifndef BNX2X_INIT_OPS_H
 #define BNX2X_INIT_OPS_H
 
+
+#ifndef BP_ILT
+#define BP_ILT(bp)	NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp)	0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp)	0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x)	x
+#endif
+
 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
-static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-				      u32 addr, u32 len);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+				      dma_addr_t phys_addr, u32 addr,
+				      u32 len);
 
-static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
-			      u32 len)
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
 {
 	u32 i;
 
@@ -29,24 +55,32 @@
 		REG_WR(bp, addr + i*4, data[i]);
 }
 
-static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
-			      u32 len)
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
 {
 	u32 i;
 
 	for (i = 0; i < len; i++)
-		REG_WR_IND(bp, addr + i*4, data[i]);
+		bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
 }
 
-static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+				u8 wb)
 {
 	if (bp->dmae_ready)
 		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else if (wb)
+		/*
+		 * Wide bus registers with no dmae need to be written
+		 * using indirect write.
+		 */
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
 	else
 		bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
 }
 
-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+			    u32 len, u8 wb)
 {
 	u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
 	u32 buf_len32 = buf_len/4;
@@ -57,12 +91,20 @@
 	for (i = 0; i < len; i += buf_len32) {
 		u32 cur_len = min(buf_len32, len - i);
 
-		bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+		bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
 	}
 }
 
-static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
-			     u32 len64)
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len64)
 {
 	u32 buf_len32 = FW_BUF_SIZE/4;
 	u32 len = len64*2;
@@ -82,7 +124,7 @@
 	for (i = 0; i < len; i += buf_len32) {
 		u32 cur_len = min(buf_len32, len - i);
 
-		bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+		bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
 	}
 }
 
@@ -100,7 +142,8 @@
 #define IF_IS_PRAM_ADDR(base, addr) \
 			if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
 
-static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+				const u8 *data)
 {
 	IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
 		data = INIT_TSEM_INT_TABLE_DATA(bp);
@@ -129,31 +172,17 @@
 	return data;
 }
 
-static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len)
 {
 	if (bp->dmae_ready)
-		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+		VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
 	else
-		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
-}
-
-static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
-			     u32 len)
-{
-	const u32 *old_data = data;
-
-	data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
-
-	if (bp->dmae_ready) {
-		if (old_data != data)
-			VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
-		else
-			VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
-	} else
 		bnx2x_init_ind_wr(bp, addr, data, len);
 }
 
-static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+			u32 val_hi)
 {
 	u32 wb_write[2];
 
@@ -161,8 +190,8 @@
 	wb_write[1] = val_hi;
 	REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
 }
-
-static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+			     u32 blob_off)
 {
 	const u8 *data = NULL;
 	int rc;
@@ -186,39 +215,33 @@
 static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
 {
 	u16 op_start =
-		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_START)];
 	u16 op_end =
-		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_END)];
 	union init_op *op;
-	int hw_wr;
-	u32 i, op_type, addr, len;
+	u32 op_idx, op_type, addr, len;
 	const u32 *data, *data_base;
 
 	/* If empty block */
 	if (op_start == op_end)
 		return;
 
-	if (CHIP_REV_IS_FPGA(bp))
-		hw_wr = OP_WR_FPGA;
-	else if (CHIP_REV_IS_EMUL(bp))
-		hw_wr = OP_WR_EMUL;
-	else
-		hw_wr = OP_WR_ASIC;
-
 	data_base = INIT_DATA(bp);
 
-	for (i = op_start; i < op_end; i++) {
+	for (op_idx = op_start; op_idx < op_end; op_idx++) {
 
-		op = (union init_op *)&(INIT_OPS(bp)[i]);
-
-		op_type = op->str_wr.op;
-		addr = op->str_wr.offset;
-		len = op->str_wr.data_len;
-		data = data_base + op->str_wr.data_off;
-
-		/* HW/EMUL specific */
-		if ((op_type > OP_WB) && (op_type == hw_wr))
-			op_type = OP_WR;
+		op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+		/* Get generic data */
+		op_type = op->raw.op;
+		addr = op->raw.offset;
+		/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+		 * OP_WR64 (we assume that op_arr_write and op_write have the
+		 * same structure).
+		 */
+		len = op->arr_wr.data_len;
+		data = data_base + op->arr_wr.data_off;
 
 		switch (op_type) {
 		case OP_RD:
@@ -233,21 +256,39 @@
 		case OP_WB:
 			bnx2x_init_wr_wb(bp, addr, data, len);
 			break;
-		case OP_SI:
-			bnx2x_init_ind_wr(bp, addr, data, len);
-			break;
 		case OP_ZR:
-			bnx2x_init_fill(bp, addr, 0, op->zero.len);
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+			break;
+		case OP_WB_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
 			break;
 		case OP_ZP:
 			bnx2x_init_wr_zp(bp, addr, len,
-					 op->str_wr.data_off);
+					 op->arr_wr.data_off);
 			break;
 		case OP_WR_64:
 			bnx2x_init_wr_64(bp, addr, data, len);
 			break;
+		case OP_IF_MODE_AND:
+			/* if any of the flags doesn't match, skip the
+			 * conditional block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) !=
+				op->if_mode.mode_bit_map)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		case OP_IF_MODE_OR:
+			/* if all the flags don't match, skip the conditional
+			 * block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) == 0)
+				op_idx += op->if_mode.cmd_offset;
+			break;
 		default:
-			/* happens whenever an op is of a diff HW */
+			/* Should never get here! */
+
 			break;
 		}
 	}
@@ -417,7 +458,8 @@
 		PXP2_REG_RQ_BW_WR_UBOUND30}
 };
 
-static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+			       int w_order)
 {
 	u32 val, i;
 
@@ -491,19 +533,21 @@
 	if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
 		REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
 
-	if (CHIP_IS_E2(bp))
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+	else if (CHIP_IS_E2(bp))
 		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
 	else
 		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
 
-	if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1(bp)) {
 		/*    MPS      w_order     optimal TH      presently TH
 		 *    128         0             0               2
 		 *    256         1             1               3
 		 *    >=512       2             2               3
 		 */
 		/* DMAE is special */
-		if (CHIP_IS_E2(bp)) {
+		if (!CHIP_IS_E1H(bp)) {
 			/* E2 can use optimal TH */
 			val = w_order;
 			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
@@ -557,8 +601,8 @@
 #define ILT_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
 #define ILT_RANGE(f, l)		(((l) << 10) | f)
 
-static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
-				 u32 size, u8 memop)
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+				 struct ilt_line *line, u32 size, u8 memop)
 {
 	if (memop == ILT_MEMOP_FREE) {
 		BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
@@ -572,7 +616,8 @@
 }
 
 
-static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+				   u8 memop)
 {
 	int i, rc;
 	struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -617,8 +662,8 @@
 	bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
 }
 
-static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
-				   int idx, u8 initop)
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+				   struct bnx2x_ilt *ilt, int idx, u8 initop)
 {
 	dma_addr_t	null_mapping;
 	int abs_idx = ilt->start_line + idx;
@@ -733,7 +778,7 @@
 }
 
 static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
-					    u32 psz_reg, u8 initop)
+				      u32 psz_reg, u8 initop)
 {
 	struct bnx2x_ilt *ilt = BP_ILT(bp);
 	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
@@ -848,7 +893,8 @@
 
 	/* Initialize T2 */
 	for (i = 0; i < src_cid_count-1; i++)
-		t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
+		t2[i].next = (u64)(t2_mapping +
+			     (i+1)*sizeof(struct src_ent));
 
 	/* tell the searcher where the T2 table is */
 	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 076e11f..8363636 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -25,6 +25,8 @@
 #include <linux/mutex.h>
 
 #include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
 
 /********************************************************/
 #define ETH_HLEN			14
@@ -35,6 +37,13 @@
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 #define MDIO_ACCESS_TIMEOUT		1000
 #define BMAC_CONTROL_RX_ENABLE		2
+#define WC_LANE_MAX			4
+#define I2C_SWITCH_WIDTH		2
+#define I2C_BSC0			0
+#define I2C_BSC1			1
+#define I2C_WA_RETRY_CNT		3
+#define MCPR_IMC_COMMAND_READ_OP	1
+#define MCPR_IMC_COMMAND_WRITE_OP	2
 
 /***********************************************************/
 /*			Shortcut definitions		   */
@@ -103,16 +112,13 @@
 			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
 #define GP_STATUS_10G_CX4 \
 			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
-#define GP_STATUS_12G_HIG \
-			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
-#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
-#define GP_STATUS_13G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
-#define GP_STATUS_15G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
-#define GP_STATUS_16G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
 #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
 #define GP_STATUS_10G_KX4 \
 			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-
+#define	GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define	GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define	GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define	GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
 #define LINK_10THD		LINK_STATUS_SPEED_AND_DUPLEX_10THD
 #define LINK_10TFD		LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD		LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -126,20 +132,10 @@
 #define LINK_2500XFD		LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
 #define LINK_10GTFD		LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
 #define LINK_10GXFD		LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD		LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD		LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
-#define LINK_12_5GTFD		LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
-#define LINK_12_5GXFD		LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD		LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD		LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD		LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD		LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD		LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD		LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_20GTFD		LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD		LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
 
-#define PHY_XGXS_FLAG			0x1
-#define PHY_SGMII_FLAG			0x2
-#define PHY_SERDES_FLAG			0x4
+
 
 /* */
 #define SFP_EEPROM_CON_TYPE_ADDR		0x2
@@ -165,8 +161,104 @@
 #define EDC_MODE_PASSIVE_DAC			0x0055
 
 
+/* BRB thresholds for E2*/
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE		170
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE		0
+
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE		250
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE		0
+
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE		90
+
+#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE			50
+#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE		250
+
+/* BRB thresholds for E3A0 */
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE		290
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE		0
+
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE		410
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE		0
+
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE		170
+
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE		410
+
+
+/* BRB thresholds for E3B0 2 port mode*/
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE		1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE	0
+
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE		1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE	0
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE	1025
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE	1025
+
+/* only for E3B0*/
+#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR			1025
+#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR			1025
+
+/* Lossy +Lossless GUARANTIED == GUART */
+#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART			284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_LB_GUART			236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_LB_GUART			342
+
+/* Lossy +Lossless*/
+#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART		284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART		236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART		336
+#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST		80
+
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART		0
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST		0
+
+/* BRB thresholds for E3B0 4 port mode */
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE		304
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE	0
+
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE		384
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE	0
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE	304
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE	384
+
+
+/* only for E3B0*/
+#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR			304
+#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR			384
+#define PFC_E3B0_4P_LB_GUART				120
+
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART		120
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST		80
+
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART		80
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST		120
+
+#define DCBX_INVALID_COS					(0xFF)
+
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND		(0x5000)
 #define ETS_BW_LIMIT_CREDIT_WEIGHT		(0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS		(1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS			(2720)
+#define ETS_E3B0_PBF_MIN_W_VAL				(10000)
+
+#define MAX_PACKET_SIZE					(9700)
+#define WC_UC_TIMEOUT					100
+
 /**********************************************************/
 /*                     INTERFACE                          */
 /**********************************************************/
@@ -202,14 +294,86 @@
 }
 
 /******************************************************************/
+/*			EPIO/GPIO section			  */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+	u32 epio_mask, gp_oenable;
+	*en = 0;
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+		return;
+	}
+
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+	*en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+	u32 epio_mask, gp_output, gp_oenable;
+
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+		return;
+	}
+	DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+	if (en)
+		gp_output |= epio_mask;
+	else
+		gp_output &= ~epio_mask;
+
+	REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+	/* Set the value for this EPIO */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+	}
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return -EINVAL;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		*val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+	}
+	return 0;
+
+}
+/******************************************************************/
 /*				ETS section			  */
 /******************************************************************/
-void bnx2x_ets_disabled(struct link_params *params)
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
 {
 	/* ETS disabled configuration*/
 	struct bnx2x *bp = params->bp;
 
-	DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
+	DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
 
 	/*
 	 * mapping between entry  priority to client number (0,1,2 -debug and
@@ -262,7 +426,756 @@
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 }
+/******************************************************************************
+* Description:
+*	Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+	u32 min_w_val = 0;
+	/* Calculate min_w_val.*/
+	if (vars->link_up) {
+		if (SPEED_20000 == vars->line_speed)
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+		else
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+	} else
+		min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+	/**
+	 *  If the link isn't up (static configuration for example ) The
+	 *  link will be according to 20GBPS.
+	*/
+	return min_w_val;
+}
+/******************************************************************************
+* Description:
+*	Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+	const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+						MAX_PACKET_SIZE);
+	return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
 
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+		NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+	if (0 == port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+			credit_upper_bound);
+	}
+}
+/******************************************************************************
+* Description:
+*	Will return the NIG ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+					const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+	/**
+	 * mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+	 * reset value or init tool
+	 */
+	if (port) {
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+	} else {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+	}
+	/**
+	* For strict priority entries defines the number of consecutive
+	* slots for the highest priority.
+	*/
+	/* TODO_ETS - Should be done by reset value or init tool */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/**
+	 * mapping between the CREDIT_WEIGHT registers and actual client
+	 * numbers
+	 */
+	/* TODO_ETS - Should be done by reset value or init tool */
+	if (port) {
+		/*Port 1 has 6 COS*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+	} else {
+		/*Port 0 has 9 COS*/
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+		       0x43210876);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+	}
+
+	/**
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
+	 * COS0 entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2   bit1	  bit0
+	 * MCP and debug are strict
+	 */
+	if (port)
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+	else
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+	/* defines which entries (clients) are subjected to WFQ arbitration */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+	/**
+	* Please notice the register address are note continuous and a
+	* for here is note appropriate.In 2 port mode port0 only COS0-5
+	* can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+	* port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+	* are never used for WFQ
+	*/
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+	if (0 == port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+	}
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
+	const u8 port = params->port;
+	u32 base_upper_bound = 0;
+	u8 max_cos = 0;
+	u8 i = 0;
+	/**
+	* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+	* port mode port1 has COS0-2 that can be used for WFQ.
+	*/
+	if (0 == port) {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+*	Will return the PBF ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 i = 0;
+	u32 base_weight = 0;
+	u8 max_cos = 0;
+
+	/**
+	 * mapping between entry  priority to client number 0 - COS0
+	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+	 * TODO_ETS - Should be done by reset value or init tool
+	 */
+	if (port)
+		/*  0x688 (|011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+	else
+		/*  (10 1|100 |011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+	/* TODO_ETS - Should be done by reset value or init tool */
+	if (port)
+		/* 0x688 (|011|0 10|00 1|000)*/
+		REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+	else
+	/* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+	REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+		   PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+	/**
+	* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+	* In 4 port mode port1 has COS0-2 that can be used for WFQ.
+	*/
+	if (0 == port) {
+		base_weight = PBF_REG_COS0_WEIGHT_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_weight = PBF_REG_COS0_WEIGHT_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_weight + (0x4 * i), 0);
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+*	E3B0 disable will return basicly the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+				   const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+				   "\n");
+		return -EINVAL;
+	}
+
+	bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+	bnx2x_ets_e3b0_pbf_disabled(params);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Disable will return basicly the values to init values.
+*.
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+		      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+
+	if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+		bnx2x_ets_e2e3a0_disabled(params);
+	else if (CHIP_IS_E3B0(bp))
+		bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+	else {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+		return -EINVAL;
+	}
+
+	return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+*	Set the COS mappimg to SP and BW until this point all the COS are not
+*	set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+				  const struct bnx2x_ets_params *ets_params,
+				  const u8 cos_sp_bitmap,
+				  const u8 cos_bw_bitmap)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+	const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+	const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+	const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+	       nig_cli_subject2wfq_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+	       pbf_cli_subject2wfq_bitmap);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*	not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+				     const u8 cos_entry,
+				     const u32 min_w_val_nig,
+				     const u32 min_w_val_pbf,
+				     const u16 total_bw,
+				     const u8 bw,
+				     const u8 port)
+{
+	u32 nig_reg_adress_crd_weight = 0;
+	u32 pbf_reg_adress_crd_weight = 0;
+	/* Calculate and set BW for this COS*/
+	const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
+	const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+
+	switch (cos_entry) {
+	case 0:
+	    nig_reg_adress_crd_weight =
+		 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		     NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+	     break;
+	case 1:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+	     break;
+	case 2:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+		 pbf_reg_adress_crd_weight = (port) ?
+		     PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+	     break;
+	case 3:
+	    if (port)
+			return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+	     pbf_reg_adress_crd_weight =
+		 PBF_REG_COS3_WEIGHT_P0;
+	     break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+	     break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+	     break;
+	}
+
+	REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+	REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+	return 0;
+}
+/******************************************************************************
+* Description:
+*	Calculate the total BW.A value of 0 isn't legal.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+	const struct link_params *params,
+	const struct bnx2x_ets_params *ets_params,
+	u16 *total_bw)
+{
+	struct bnx2x *bp = params->bp;
+	u8 cos_idx = 0;
+
+	*total_bw = 0 ;
+	/* Calculate total BW requested */
+	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+		if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+
+			if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
+				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+						   "was set to 0\n");
+			return -EINVAL;
+		}
+		*total_bw +=
+		    ets_params->cos[cos_idx].params.bw_params.bw;
+	    }
+	}
+
+	/*Check taotl BW is valid */
+	if ((100 != *total_bw) || (0 == *total_bw)) {
+		if (0 == *total_bw) {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
+					   "shouldn't be 0\n");
+			return -EINVAL;
+		}
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be"
+				   "100\n");
+		/**
+		*   We can handle a case whre the BW isn't 100 this can happen
+		*   if the TC are joined.
+		*/
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Invalidate all the sp_pri_to_cos.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+	u8 pri = 0;
+	for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+		sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+					    u8 *sp_pri_to_cos, const u8 pri,
+					    const u8 cos_entry)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+				   "parameter There can't be two COS's with"
+				   "the same strict pri\n");
+		return -EINVAL;
+	}
+
+	if (pri > max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid"
+			       "parameter Illegal strict priority\n");
+	    return -EINVAL;
+	}
+
+	sp_pri_to_cos[pri] = cos_entry;
+	return 0;
+
+}
+
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in
+*	the sp_pri_cli register.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+					 const u8 pri_set,
+					 const u8 pri_offset,
+					 const u8 entry_size)
+{
+	u64 pri_cli_nig = 0;
+	pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+						    (pri_set + pri_offset));
+
+	return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for NIG.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	const u8 nig_cos_offset = 3;
+	const u8 nig_pri_offset = 3;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+		nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for PBF.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+	const u8 pbf_cos_offset = 0;
+	const u8 pbf_pri_offset = 0;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+		pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.(which COS has higher priority)
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+					     u8 *sp_pri_to_cos)
+{
+	struct bnx2x *bp = params->bp;
+	u8 i = 0;
+	const u8 port = params->port;
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	u64 pri_cli_nig = 0x210;
+	u32 pri_cli_pbf = 0x0;
+	u8 pri_set = 0;
+	u8 pri_bitmask = 0;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+	/* Set all the strict priority first */
+	for (i = 0; i < max_num_of_cos; i++) {
+		if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
+			if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+				DP(NETIF_MSG_LINK,
+					   "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					   "invalid cos entry\n");
+				return -EINVAL;
+			}
+
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    sp_pri_to_cos[i], pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    sp_pri_to_cos[i], pri_set);
+			pri_bitmask = 1 << sp_pri_to_cos[i];
+			/* COS is used remove it from bitmap.*/
+			if (0 == (pri_bitmask & cos_bit_to_set)) {
+				DP(NETIF_MSG_LINK,
+					"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					"invalid There can't be two COS's with"
+					" the same strict pri\n");
+				return -EINVAL;
+			}
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	/* Set all the Non strict priority i= COS*/
+	for (i = 0; i < max_num_of_cos; i++) {
+		pri_bitmask = 1 << i;
+		/* Check if COS was already used for SP */
+		if (pri_bitmask & cos_bit_to_set) {
+			/* COS wasn't used for SP */
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    i, pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    i, pri_set);
+			/* COS is used remove it from bitmap.*/
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	if (pri_set != max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+				   "entries were set\n");
+		return -EINVAL;
+	}
+
+	if (port) {
+		/* Only 6 usable clients*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       (u32)pri_cli_nig);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+	} else {
+		/* Only 9 usable clients*/
+		const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+		const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       pri_cli_nig_lsb);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+		       pri_cli_nig_msb);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 const struct bnx2x_ets_params *ets_params)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+	const u8 port = params->port;
+	u16 total_bw = 0;
+	const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 cos_bw_bitmap = 0;
+	u8 cos_sp_bitmap = 0;
+	u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+	u8 cos_entry = 0;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+				   "\n");
+		return -EINVAL;
+	}
+
+	if ((ets_params->num_of_cos > max_num_of_cos)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+				   "isn't supported\n");
+		return -EINVAL;
+	}
+
+	/* Prepare sp strict priority parameters*/
+	bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+	/* Prepare BW parameters*/
+	bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+						   &total_bw);
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed "
+				   "\n");
+		return -EINVAL;
+	}
+
+	/**
+	 *  Upper bound is set according to current link speed (min_w_val
+	 *  should be the same for upper bound and COS credit val).
+	 */
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+			cos_bw_bitmap |= (1 << cos_entry);
+			/**
+			 * The function also sets the BW in HW(not the mappin
+			 * yet)
+			 */
+			bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+				bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+				total_bw,
+				ets_params->cos[cos_entry].params.bw_params.bw,
+				 port);
+		} else if (bnx2x_cos_state_strict ==
+			ets_params->cos[cos_entry].state){
+			cos_sp_bitmap |= (1 << cos_entry);
+
+			bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+				params,
+				sp_pri_to_cos,
+				ets_params->cos[cos_entry].params.sp_params.pri,
+				cos_entry);
+
+		} else {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not"
+					   " valid\n");
+			return -EINVAL;
+		}
+		if (0 != bnx2x_status) {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw "
+					   "failed\n");
+			return bnx2x_status;
+		}
+	}
+
+	/* Set SP register (which COS has higher priority) */
+	bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+							 sp_pri_to_cos);
+
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg "
+				   "failed\n");
+		return bnx2x_status;
+	}
+
+	/* Set client mapping of BW and strict */
+	bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+					      cos_sp_bitmap,
+					      cos_bw_bitmap);
+
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+		return bnx2x_status;
+	}
+	return 0;
+}
 static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 {
 	/* ETS disabled configuration */
@@ -342,7 +1255,7 @@
 	REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
 }
 
-u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
 {
 	/* ETS disabled configuration*/
 	struct bnx2x *bp = params->bp;
@@ -388,6 +1301,53 @@
 /*			PFC section				  */
 /******************************************************************/
 
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+				  struct link_vars *vars,
+				  u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u32 xmac_base;
+	u32 pause_val, pfc0_val, pfc1_val;
+
+	/* XMAC base adrr */
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	/* Initialize pause and pfc registers */
+	pause_val = 0x18000;
+	pfc0_val = 0xFFFF8000;
+	pfc1_val = 0x2;
+
+	/* No PFC support */
+	if (!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) {
+
+		/*
+		 * RX flow control - Process pause frame in receive direction
+		 */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+		/*
+		 * TX flow control - Send pause packet when buffer is full
+		 */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+	} else {/* PFC support */
+		pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+			XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+			XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+			XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+	}
+
+	/* Write pause and PFC registers */
+	REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+	udelay(30);
+}
+
+
 static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
 				     u32 pfc_frames_sent[2],
 				     u32 pfc_frames_received[2])
@@ -464,6 +1424,32 @@
 /******************************************************************/
 /*			MAC/PBF section				  */
 /******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+{
+	u32 mode, emac_base;
+	/**
+	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	 * (a value of 49==0x31) and make sure that the AUTO poll is off
+	 */
+
+	if (CHIP_IS_E2(bp))
+		emac_base = GRCBASE_EMAC0;
+	else
+		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+	mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
+		  EMAC_MDIO_MODE_CLOCK_CNT);
+	if (USES_WARPCORE(bp))
+		mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+	else
+		mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+
+	mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+
+	udelay(40);
+}
+
 static void bnx2x_emac_init(struct link_params *params,
 			    struct link_vars *vars)
 {
@@ -495,7 +1481,7 @@
 		}
 		timeout--;
 	} while (val & EMAC_MODE_RESET);
-
+	bnx2x_set_mdio_clk(bp, params->chip_id, port);
 	/* Set mac address */
 	val = ((params->mac_addr[0] << 8) |
 		params->mac_addr[1]);
@@ -508,9 +1494,236 @@
 	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
 }
 
-static u8 bnx2x_emac_enable(struct link_params *params,
+static void bnx2x_set_xumac_nig(struct link_params *params,
+				u16 tx_pause_en,
+				u8 enable)
+{
+	struct bnx2x *bp = params->bp;
+
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+	       NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
 			    struct link_vars *vars, u8 lb)
 {
+	u32 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+	struct bnx2x *bp = params->bp;
+	/* Reset UMAC */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+	DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+	/**
+	 * This register determines on which events the MAC will assert
+	 * error on the i/f to the NIG along w/ EOP.
+	 */
+
+	/**
+	 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
+	 * params->port*0x14,      0xfffff.
+	 */
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+		UMAC_COMMAND_CONFIG_REG_PAD_EN |
+		UMAC_COMMAND_CONFIG_REG_SW_RESET |
+		UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+	switch (vars->line_speed) {
+	case SPEED_10:
+		val |= (0<<2);
+		break;
+	case SPEED_100:
+		val |= (1<<2);
+		break;
+	case SPEED_1000:
+		val |= (2<<2);
+		break;
+	case SPEED_2500:
+		val |= (3<<2);
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+			       vars->line_speed);
+		break;
+	}
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Enable RX and TX */
+	val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+	val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+		UMAC_COMMAND_CONFIG_REG_RX_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Remove SW Reset */
+	val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+	/*
+	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+	vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+	u32 port4mode_ovwr_val;
+	/* Check 4-port override enabled */
+	port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+	if (port4mode_ovwr_val & (1<<0)) {
+		/* Return 4-port mode override value */
+		return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+	}
+	/* Return 4-port mode from input pin */
+	return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
+{
+	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+	/**
+	* In 4-port mode, need to set the mode only once, so if XMAC is
+	* already out of reset, it means the mode has already been set,
+	* and it must not* reset the XMAC again, since it controls both
+	* ports of the path
+	**/
+
+	if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	     MISC_REGISTERS_RESET_REG_2_XMAC)) {
+		DP(NETIF_MSG_LINK, "XMAC already out of reset"
+				   " in 4-port mode\n");
+		return;
+	}
+
+	/* Hard reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	if (is_port4mode) {
+		DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+		/*  Set the number of ports on the system side to up to 2 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+		/* Set the number of ports on the Warp Core to 10G */
+		REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+	} else {
+		/*  Set the number of ports on the system side to 1 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+		if (max_speed == SPEED_10000) {
+			DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1"
+					   " port per path\n");
+			/* Set the number of ports on the Warp Core to 10G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+		} else {
+			DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports"
+					   " per path\n");
+			/* Set the number of ports on the Warp Core to 20G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+		}
+	}
+	/* Soft reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_xmac_disable(struct link_params *params)
+{
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	    MISC_REGISTERS_RESET_REG_2_XMAC) {
+		DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+		REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+		usleep_range(1000, 1000);
+		bnx2x_set_xumac_nig(params, 0, 0);
+		REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+		       XMAC_CTRL_REG_SOFT_RESET);
+	}
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
+	u32 val, xmac_base;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	bnx2x_xmac_init(bp, vars->line_speed);
+
+	/*
+	 * This register determines on which events the MAC will assert
+	 * error on the i/f to the NIG along w/ EOP.
+	 */
+
+	/*
+	 * This register tells the NIG whether to send traffic to UMAC
+	 * or XMAC
+	 */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+	/* Set Max packet size */
+	REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+	/* CRC append for Tx packets */
+	REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+	/* update PFC */
+	bnx2x_update_pfc_xmac(params, vars, 0);
+
+	/* Enable TX and RX */
+	val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+	vars->mac_type = MAC_TYPE_XMAC;
+
+	return 0;
+}
+static int bnx2x_emac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
@@ -760,97 +1973,391 @@
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-static void bnx2x_update_pfc_brb(struct link_params *params,
-		struct link_vars *vars,
-		struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+
+/* PFC BRB internal port configuration params */
+struct bnx2x_pfc_brb_threshold_val {
+	u32 pause_xoff;
+	u32 pause_xon;
+	u32 full_xoff;
+	u32 full_xon;
+};
+
+struct bnx2x_pfc_brb_e3b0_val {
+	u32 full_lb_xoff_th;
+	u32 full_lb_xon_threshold;
+	u32 lb_guarantied;
+	u32 mac_0_class_t_guarantied;
+	u32 mac_0_class_t_guarantied_hyst;
+	u32 mac_1_class_t_guarantied;
+	u32 mac_1_class_t_guarantied_hyst;
+};
+
+struct bnx2x_pfc_brb_th_val {
+	struct bnx2x_pfc_brb_threshold_val pauseable_th;
+	struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+};
+static int bnx2x_pfc_brb_get_config_params(
+				struct link_params *params,
+				struct bnx2x_pfc_brb_th_val *config_val)
 {
 	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+	if (CHIP_IS_E2(bp)) {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+		    PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	} else if (CHIP_IS_E3A0(bp)) {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+		    PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	} else if (CHIP_IS_E3B0(bp)) {
+		if (params->phy[INT_PHY].flags &
+		    FLAGS_4_PORT_MODE) {
+			config_val->pauseable_th.pause_xoff =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+			config_val->pauseable_th.pause_xon =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+			config_val->pauseable_th.full_xoff =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+			config_val->pauseable_th.full_xon =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+			/* non pause able*/
+			config_val->non_pauseable_th.pause_xoff =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.pause_xon =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xoff =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xon =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	    } else {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+			PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	    }
+	} else
+	    return -EINVAL;
+
+	return 0;
+}
+
+
+static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
+						 struct bnx2x_pfc_brb_e3b0_val
+						 *e3b0_val,
+						 u32 cos0_pauseable,
+						 u32 cos1_pauseable)
+{
+	if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+		e3b0_val->full_lb_xoff_th =
+		    PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+		e3b0_val->full_lb_xon_threshold =
+		    PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+		e3b0_val->lb_guarantied =
+		    PFC_E3B0_4P_LB_GUART;
+		e3b0_val->mac_0_class_t_guarantied =
+		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+		e3b0_val->mac_0_class_t_guarantied_hyst =
+		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+		e3b0_val->mac_1_class_t_guarantied =
+		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+		e3b0_val->mac_1_class_t_guarantied_hyst =
+		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+	} else {
+		e3b0_val->full_lb_xoff_th =
+		    PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+		e3b0_val->full_lb_xon_threshold =
+		    PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+		e3b0_val->mac_0_class_t_guarantied_hyst =
+		    PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+		e3b0_val->mac_1_class_t_guarantied =
+		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+		e3b0_val->mac_1_class_t_guarantied_hyst =
+		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+		if (cos0_pauseable != cos1_pauseable) {
+			/* nonpauseable= Lossy + pauseable = Lossless*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+		} else if (cos0_pauseable) {
+			/* Lossless +Lossless*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+		} else {
+			/* Lossy +Lossy*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+		}
+	}
+}
+static int bnx2x_update_pfc_brb(struct link_params *params,
+				struct link_vars *vars,
+				struct bnx2x_nig_brb_pfc_port_params
+				*pfc_params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_pfc_brb_th_val config_val = { {0} };
+	struct bnx2x_pfc_brb_threshold_val *reg_th_config =
+	    &config_val.pauseable_th;
+	struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
 	int set_pfc = params->feature_config_flags &
 		FEATURE_CONFIG_PFC_ENABLED;
+	int bnx2x_status = 0;
+	u8 port = params->port;
 
 	/* default - pause configuration */
-	u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
-	u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
-	u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
-	u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+	reg_th_config = &config_val.pauseable_th;
+	bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
+	if (0 != bnx2x_status)
+		return bnx2x_status;
 
 	if (set_pfc && pfc_params)
 		/* First COS */
-		if (!pfc_params->cos0_pauseable) {
-			pause_xoff_th =
-			  PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
-			pause_xon_th =
-			  PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
-			full_xoff_th =
-			  PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
-			full_xon_th =
-			  PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
-		}
+		if (!pfc_params->cos0_pauseable)
+			reg_th_config = &config_val.non_pauseable_th;
 	/*
 	 * The number of free blocks below which the pause signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
-	REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
+	       reg_th_config->pause_xoff);
 	/*
 	 * The number of free blocks above which the pause signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
-	REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
 	/*
 	 * The number of free blocks below which the full signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
-	REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
+	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
+	       BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
 	/*
 	 * The number of free blocks above which the full signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
-	REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
+	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
+	       BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
 
 	if (set_pfc && pfc_params) {
 		/* Second COS */
-		if (pfc_params->cos1_pauseable) {
-			pause_xoff_th =
-			  PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
-			pause_xon_th =
-			  PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
-			full_xoff_th =
-			  PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
-			full_xon_th =
-			  PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
-		} else {
-			pause_xoff_th =
-			  PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
-			pause_xon_th =
-			  PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
-			full_xoff_th =
-			  PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
-			full_xon_th =
-			  PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
-		}
+		if (pfc_params->cos1_pauseable)
+			reg_th_config = &config_val.pauseable_th;
+		else
+			reg_th_config = &config_val.non_pauseable_th;
 		/*
 		 * The number of free blocks below which the pause signal to
 		 * class 1 of MAC #n is asserted. n=0,1
-		 */
-		REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
+		**/
+		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+		       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+		       reg_th_config->pause_xoff);
 		/*
 		 * The number of free blocks above which the pause signal to
 		 * class 1 of MAC #n is de-asserted. n=0,1
 		 */
-		REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
+		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+		       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+		       reg_th_config->pause_xon);
 		/*
 		 * The number of free blocks below which the full signal to
 		 * class 1 of MAC #n is asserted. n=0,1
 		 */
-		REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
+		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+		       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+		       reg_th_config->full_xoff);
 		/*
 		 * The number of free blocks above which the full signal to
 		 * class 1 of MAC #n is de-asserted. n=0,1
 		 */
-		REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
+		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+		       BRB1_REG_FULL_1_XON_THRESHOLD_0,
+		       reg_th_config->full_xon);
+
+
+		if (CHIP_IS_E3B0(bp)) {
+			/*Should be done by init tool */
+			/*
+			* BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
+			* reset value
+			* 944
+			*/
+
+			/**
+			 * The hysteresis on the guarantied buffer space for the Lb port
+			 * before signaling XON.
+			 **/
+			REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
+
+			bnx2x_pfc_brb_get_e3b0_config_params(
+			    params,
+			    &e3b0_val,
+			    pfc_params->cos0_pauseable,
+			    pfc_params->cos1_pauseable);
+			/**
+			 * The number of free blocks below which the full signal to the
+			 * LB port is asserted.
+			*/
+			REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+				   e3b0_val.full_lb_xoff_th);
+			/**
+			 * The number of free blocks above which the full signal to the
+			 * LB port is de-asserted.
+			*/
+			REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+				   e3b0_val.full_lb_xon_threshold);
+			/**
+			* The number of blocks guarantied for the MAC #n port. n=0,1
+			*/
+
+			/*The number of blocks guarantied for the LB port.*/
+			REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+			       e3b0_val.lb_guarantied);
+
+			/**
+			 * The number of blocks guarantied for the MAC #n port.
+			*/
+			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+				   2 * e3b0_val.mac_0_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+				   2 * e3b0_val.mac_1_class_t_guarantied);
+			/**
+			 * The number of blocks guarantied for class #t in MAC0. t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+			       e3b0_val.mac_0_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+			       e3b0_val.mac_0_class_t_guarantied);
+			/**
+			 * The hysteresis on the guarantied buffer space for class in
+			 * MAC0.  t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+			       e3b0_val.mac_0_class_t_guarantied_hyst);
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+			       e3b0_val.mac_0_class_t_guarantied_hyst);
+
+			/**
+			 * The number of blocks guarantied for class #t in MAC1.t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+			       e3b0_val.mac_1_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+			       e3b0_val.mac_1_class_t_guarantied);
+			/**
+			 * The hysteresis on the guarantied buffer space for class #t
+			* in MAC1.  t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+			       e3b0_val.mac_1_class_t_guarantied_hyst);
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+			       e3b0_val.mac_1_class_t_guarantied_hyst);
+
+	    }
+
 	}
+
+	return bnx2x_status;
 }
 
+/******************************************************************************
+* Description:
+*  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+					      u8 cos_entry,
+					      u32 priority_mask, u8 port)
+{
+	u32 nig_reg_rx_priority_mask_add = 0;
+
+	switch (cos_entry) {
+	case 0:
+	     nig_reg_rx_priority_mask_add = (port) ?
+		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+	     break;
+	case 1:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+	    break;
+	case 2:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+	    break;
+	case 3:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+	    break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+	    break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+	    break;
+	}
+
+	REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+	return 0;
+}
 static void bnx2x_update_pfc_nig(struct link_params *params,
 		struct link_vars *vars,
 		struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -858,9 +2365,9 @@
 	u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
 	u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
 	u32 pkt_priority_to_cos = 0;
-	u32 val;
 	struct bnx2x *bp = params->bp;
-	int port = params->port;
+	u8 port = params->port;
+
 	int set_pfc = params->feature_config_flags &
 		FEATURE_CONFIG_PFC_ENABLED;
 	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
@@ -881,6 +2388,9 @@
 		pause_enable = 0;
 		llfc_out_en = 0;
 		llfc_enable = 0;
+		if (CHIP_IS_E3(bp))
+			ppp_enable = 0;
+		else
 		ppp_enable = 1;
 		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
 				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
@@ -899,6 +2409,9 @@
 		xcm0_out_en = 1;
 	}
 
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+		       NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
 	REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
 	       NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
 	REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
@@ -920,30 +2433,13 @@
 	/* HW PFC TX enable */
 	REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
 
-	/* 0x2 = BMAC, 0x1= EMAC */
-	switch (vars->mac_type) {
-	case MAC_TYPE_EMAC:
-		val = 1;
-		break;
-	case MAC_TYPE_BMAC:
-		val = 0;
-		break;
-	default:
-		val = 0;
-		break;
-	}
-	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
-
 	if (nig_params) {
+		u8 i = 0;
 		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
 
-		REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
-		       NIG_REG_P0_RX_COS0_PRIORITY_MASK,
-		       nig_params->rx_cos0_priority_mask);
-
-		REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
-		       NIG_REG_P0_RX_COS1_PRIORITY_MASK,
-		       nig_params->rx_cos1_priority_mask);
+		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+			bnx2x_pfc_nig_rx_priority_mask(bp, i,
+		nig_params->rx_cos_priority_mask[i], port);
 
 		REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
 		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
@@ -958,8 +2454,7 @@
 	       pkt_priority_to_cos);
 }
 
-
-void bnx2x_update_pfc(struct link_params *params,
+int bnx2x_update_pfc(struct link_params *params,
 		      struct link_vars *vars,
 		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
@@ -970,41 +2465,51 @@
 	 */
 	u32 val;
 	struct bnx2x *bp = params->bp;
-
+	int bnx2x_status = 0;
+	u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
 	/* update NIG params */
 	bnx2x_update_pfc_nig(params, vars, pfc_params);
 
 	/* update BRB params */
-	bnx2x_update_pfc_brb(params, vars, pfc_params);
+	bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
+	if (0 != bnx2x_status)
+		return bnx2x_status;
 
 	if (!vars->link_up)
-		return;
-
-	val = REG_RD(bp, MISC_REG_RESET_REG_2);
-	if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
-	    == 0) {
-		DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
-		bnx2x_emac_enable(params, vars, 0);
-		return;
-	}
+		return bnx2x_status;
 
 	DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
-	if (CHIP_IS_E2(bp))
-		bnx2x_update_pfc_bmac2(params, vars, 0);
-	else
-		bnx2x_update_pfc_bmac1(params, vars);
+	if (CHIP_IS_E3(bp))
+		bnx2x_update_pfc_xmac(params, vars, 0);
+	else {
+		val = REG_RD(bp, MISC_REG_RESET_REG_2);
+		if ((val &
+		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+		    == 0) {
+			DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+			bnx2x_emac_enable(params, vars, 0);
+			return bnx2x_status;
+		}
 
-	val = 0;
-	if ((params->feature_config_flags &
-	      FEATURE_CONFIG_PFC_ENABLED) ||
-	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
-		val = 1;
-	REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+		if (CHIP_IS_E2(bp))
+			bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+		else
+			bnx2x_update_pfc_bmac1(params, vars);
+
+		val = 0;
+		if ((params->feature_config_flags &
+		     FEATURE_CONFIG_PFC_ENABLED) ||
+		    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+			val = 1;
+		REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+	}
+	return bnx2x_status;
 }
 
-static u8 bnx2x_bmac1_enable(struct link_params *params,
-			     struct link_vars *vars,
-			     u8 is_lb)
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -1063,12 +2568,18 @@
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
 		    wb_data, 2);
 
+	if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
+		REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS,
+			    wb_data, 2);
+		if (wb_data[0] > 0)
+			return -ESRCH;
+	}
 	return 0;
 }
 
-static u8 bnx2x_bmac2_enable(struct link_params *params,
-			     struct link_vars *vars,
-			     u8 is_lb)
+static int bnx2x_bmac2_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -1128,14 +2639,25 @@
 	udelay(30);
 	bnx2x_update_pfc_bmac2(params, vars, is_lb);
 
+	if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
+		REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT,
+			    wb_data, 2);
+		if (wb_data[0] > 0) {
+			DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n",
+				       wb_data[0]);
+			return -ESRCH;
+		}
+	}
+
 	return 0;
 }
 
-static u8 bnx2x_bmac_enable(struct link_params *params,
-			    struct link_vars *vars,
-			    u8 is_lb)
+static int bnx2x_bmac_enable(struct link_params *params,
+			     struct link_vars *vars,
+			     u8 is_lb)
 {
-	u8 rc, port = params->port;
+	int rc = 0;
+	u8 port = params->port;
 	struct bnx2x *bp = params->bp;
 	u32 val;
 	/* reset and unreset the BigMac */
@@ -1218,8 +2740,8 @@
 	}
 }
 
-static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
-			   u32 line_speed)
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+			    u32 line_speed)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -1269,18 +2791,6 @@
 		case SPEED_10000:
 			init_crd = thresh + 553 - 22;
 			break;
-
-		case SPEED_12000:
-			init_crd = thresh + 664 - 22;
-			break;
-
-		case SPEED_13000:
-			init_crd = thresh + 742 - 22;
-			break;
-
-		case SPEED_16000:
-			init_crd = thresh + 778 - 22;
-			break;
 		default:
 			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
 				  line_speed);
@@ -1349,31 +2859,23 @@
 }
 
 /******************************************************************/
-/*			CL45 access functions			  */
+/*			CL22 access functions			  */
 /******************************************************************/
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-			   u8 devad, u16 reg, u16 val)
+static int bnx2x_cl22_write(struct bnx2x *bp,
+				       struct bnx2x_phy *phy,
+				       u16 reg, u16 val)
 {
-	u32 tmp, saved_mode;
-	u8 i, rc = 0;
-	/*
-	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
-	 * (a value of 49==0x31) and make sure that the AUTO poll is off
-	 */
-
-	saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
-	tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
-			     EMAC_MDIO_MODE_CLOCK_CNT);
-	tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
-		(49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
-	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
-	REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
-	udelay(40);
+	u32 tmp, mode;
+	u8 i;
+	int rc = 0;
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
 
 	/* address */
-
-	tmp = ((phy->addr << 21) | (devad << 16) | reg |
-	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	tmp = ((phy->addr << 21) | (reg << 16) | val |
+	       EMAC_MDIO_COMM_COMMAND_WRITE_22 |
 	       EMAC_MDIO_COMM_START_BUSY);
 	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
 
@@ -1388,57 +2890,60 @@
 	}
 	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 		DP(NETIF_MSG_LINK, "write phy register failed\n");
-		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 		rc = -EFAULT;
-	} else {
-		/* data */
-		tmp = ((phy->addr << 21) | (devad << 16) | val |
-		       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
-		       EMAC_MDIO_COMM_START_BUSY);
-		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
-
-		for (i = 0; i < 50; i++) {
-			udelay(10);
-
-			tmp = REG_RD(bp, phy->mdio_ctrl +
-				     EMAC_REG_EMAC_MDIO_COMM);
-			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
-				udelay(5);
-				break;
-			}
-		}
-		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
-			DP(NETIF_MSG_LINK, "write phy register failed\n");
-			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
-			rc = -EFAULT;
-		}
 	}
-
-	/* Restore the saved mode */
-	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
-
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
 	return rc;
 }
 
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-			  u8 devad, u16 reg, u16 *ret_val)
+static int bnx2x_cl22_read(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u16 reg, u16 *ret_val)
 {
-	u32 val, saved_mode;
+	u32 val, mode;
 	u16 i;
-	u8 rc = 0;
-	/*
-	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
-	 * (a value of 49==0x31) and make sure that the AUTO poll is off
-	 */
+	int rc = 0;
 
-	saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
-	val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
-			      EMAC_MDIO_MODE_CLOCK_CNT));
-	val |= (EMAC_MDIO_MODE_CLAUSE_45 |
-		(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
-	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
-	REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
-	udelay(40);
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+	/* address */
+	val = ((phy->addr << 21) | (reg << 16) |
+	       EMAC_MDIO_COMM_COMMAND_READ_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+			udelay(5);
+			break;
+		}
+	}
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+		*ret_val = 0;
+		rc = -EFAULT;
+	}
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+	return rc;
+}
+
+/******************************************************************/
+/*			CL45 access functions			  */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+			   u8 devad, u16 reg, u16 *ret_val)
+{
+	u32 val;
+	u16 i;
+	int rc = 0;
 
 	/* address */
 	val = ((phy->addr << 21) | (devad << 16) | reg |
@@ -1460,7 +2965,6 @@
 		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 		*ret_val = 0;
 		rc = -EFAULT;
-
 	} else {
 		/* data */
 		val = ((phy->addr << 21) | (devad << 16) |
@@ -1485,15 +2989,214 @@
 			rc = -EFAULT;
 		}
 	}
-
-	/* Restore the saved mode */
-	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
 
 	return rc;
 }
 
-u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
-		  u8 devad, u16 reg, u16 *ret_val)
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+			    u8 devad, u16 reg, u16 val)
+{
+	u32 tmp;
+	u8 i;
+	int rc = 0;
+
+	/* address */
+
+	tmp = ((phy->addr << 21) | (devad << 16) | reg |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+		rc = -EFAULT;
+
+	} else {
+		/* data */
+		tmp = ((phy->addr << 21) | (devad << 16) | val |
+		       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			tmp = REG_RD(bp, phy->mdio_ctrl +
+				     EMAC_REG_EMAC_MDIO_COMM);
+			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+				udelay(5);
+				break;
+			}
+		}
+		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+			DP(NETIF_MSG_LINK, "write phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+			rc = -EFAULT;
+		}
+	}
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
+
+	return rc;
+}
+
+
+/******************************************************************/
+/*			BSC access functions from E3	          */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+	int idx;
+	u32 board_cfg, sfp_ctrl;
+	u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	/* Read I2C output PINs */
+	board_cfg = REG_RD(bp, params->shmem_base +
+			   offsetof(struct shmem_region,
+				    dev_info.shared_hw_config.board));
+	i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+	i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+			SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+	/* Read I2C output value */
+	sfp_ctrl = REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+	i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+	i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+	DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+	for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+		bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+			  struct bnx2x_phy *phy,
+			  u8 sl_devid,
+			  u16 sl_addr,
+			  u8 lc_addr,
+			  u8 xfer_cnt,
+			  u32 *data_array)
+{
+	u32 val, i;
+	int rc = 0;
+	struct bnx2x *bp = params->bp;
+
+	if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
+		DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
+		return -EINVAL;
+	}
+
+	if (xfer_cnt > 16) {
+		DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+					xfer_cnt);
+		return -EINVAL;
+	}
+	bnx2x_bsc_module_sel(params);
+
+	xfer_cnt = 16 - lc_addr;
+
+	/* enable the engine */
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	val |= MCPR_IMC_COMMAND_ENABLE;
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* program slave device ID */
+	val = (sl_devid << 16) | sl_addr;
+	REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+	/* start xfer with 0 byte to update the address pointer ???*/
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+	      (MCPR_IMC_COMMAND_WRITE_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+								i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	/* start xfer with read op */
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+		(MCPR_IMC_COMMAND_READ_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+		  (xfer_cnt);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	for (i = (lc_addr >> 2); i < 4; i++) {
+		data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+		data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+				((data_array[i] & 0x0000ff00) << 8) |
+				((data_array[i] & 0x00ff0000) >> 8) |
+				((data_array[i] & 0xff000000) >> 24);
+#endif
+	}
+	return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+				     u8 devad, u16 reg, u16 or_val)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy, devad, reg, &val);
+	bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val)
 {
 	u8 phy_index;
 	/*
@@ -1510,8 +3213,8 @@
 	return -EINVAL;
 }
 
-u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
-		   u8 devad, u16 reg, u16 val)
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val)
 {
 	u8 phy_index;
 	/*
@@ -1527,9 +3230,62 @@
 	}
 	return -EINVAL;
 }
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	u8 lane = 0;
+	struct bnx2x *bp = params->bp;
+	u32 path_swap, path_swap_ovr;
+	u8 path, port;
 
-static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
-				   struct bnx2x_phy *phy)
+	path = BP_PATH(bp);
+	port = params->port;
+
+	if (bnx2x_is_4_port_mode(bp)) {
+		u32 port_swap, port_swap_ovr;
+
+		/*figure out path swap value */
+		path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1)
+			path_swap = (path_swap_ovr & 0x2);
+		else
+			path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+		if (path_swap)
+			path = path ^ 1;
+
+		/*figure out port swap value */
+		port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+		if (port_swap_ovr & 0x1)
+			port_swap = (port_swap_ovr & 0x2);
+		else
+			port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+		if (port_swap)
+			port = port ^ 1;
+
+		lane = (port<<1) + path;
+	} else { /* two port mode - no port swap */
+
+		/*figure out path swap value */
+		path_swap_ovr =
+			REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1) {
+			path_swap = (path_swap_ovr & 0x2);
+		} else {
+			path_swap =
+				REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+		}
+		if (path_swap)
+			path = path ^ 1;
+
+		lane = path << 1 ;
+	}
+	return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+			      struct bnx2x_phy *phy)
 {
 	u32 ser_lane;
 	u16 offset, aer_val;
@@ -1538,20 +3294,28 @@
 		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
-	offset = phy->addr + ser_lane;
-	if (CHIP_IS_E2(bp))
+	offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+		(phy->addr + ser_lane) : 0;
+
+	if (USES_WARPCORE(bp)) {
+		aer_val = bnx2x_get_warpcore_lane(phy, params);
+		/*
+		 * In Dual-lane mode, two lanes are joined together,
+		 * so in order to configure them, the AER broadcast method is
+		 * used here.
+		 * 0x200 is the broadcast address for lanes 0,1
+		 * 0x201 is the broadcast address for lanes 2,3
+		 */
+		if (phy->flags & FLAGS_WC_DUAL_MODE)
+			aer_val = (aer_val >> 1) | 0x200;
+	} else if (CHIP_IS_E2(bp))
 		aer_val = 0x3800 + offset - 1;
 	else
 		aer_val = 0x3800 + offset;
+	DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
 	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 			  MDIO_AER_BLOCK_AER_REG, aer_val);
-}
-static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
-				     struct bnx2x_phy *phy)
-{
-	CL22_WR_OVER_CL45(bp, phy,
-			  MDIO_REG_BANK_AER_BLOCK,
-			  MDIO_AER_BLOCK_AER_REG, 0x3800);
+
 }
 
 /******************************************************************/
@@ -1611,20 +3375,967 @@
 	       params->phy[INT_PHY].def_md_devad);
 }
 
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+				     struct link_params *params, u16 *ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+	/**
+	 * resolve pause mode and advertisement Please refer to Table
+	 * 28B-3 of the 802.3ab-1999 spec
+	 */
+
+	switch (phy->req_flow_ctrl) {
+	case BNX2X_FLOW_CTRL_AUTO:
+		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+			*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		else
+			*ieee_fc |=
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+
+	case BNX2X_FLOW_CTRL_TX:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+
+	case BNX2X_FLOW_CTRL_RX:
+	case BNX2X_FLOW_CTRL_BOTH:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		break;
+
+	case BNX2X_FLOW_CTRL_NONE:
+	default:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+		break;
+	}
+	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+			 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 actual_phy_idx, phy_index, link_cfg_idx;
+	u8 phy_config_swapped = params->multi_phy_config &
+			PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++) {
+		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+		actual_phy_idx = phy_index;
+		if (phy_config_swapped) {
+			if (phy_index == EXT_PHY1)
+				actual_phy_idx = EXT_PHY2;
+			else if (phy_index == EXT_PHY2)
+				actual_phy_idx = EXT_PHY1;
+		}
+		params->phy[actual_phy_idx].req_flow_ctrl =
+			params->req_flow_ctrl[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_line_speed =
+			params->req_line_speed[link_cfg_idx];
+
+		params->phy[actual_phy_idx].speed_cap_mask =
+			params->speed_cap_mask[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_duplex =
+			params->req_duplex[link_cfg_idx];
+
+		if (params->req_line_speed[link_cfg_idx] ==
+		    SPEED_AUTO_NEG)
+			vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+		DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+			   " speed_cap_mask %x\n",
+			   params->phy[actual_phy_idx].req_flow_ctrl,
+			   params->phy[actual_phy_idx].req_line_speed,
+			   params->phy[actual_phy_idx].speed_cap_mask);
+	}
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    struct link_vars *vars)
+{
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* read modify write pause advertizing */
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+	val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+	}
+	DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
+{						/*  LD	    LP	 */
+	switch (pause_result) {			/* ASYM P ASYM P */
+	case 0xb:				/*   1  0   1  1 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+		break;
+
+	case 0xe:				/*   1  1   1  0 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		break;
+
+	case 0x5:				/*   0  1   0  1 */
+	case 0x7:				/*   0  1   1  1 */
+	case 0xd:				/*   1  1   0  1 */
+	case 0xf:				/*   1  1   1  1 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+		break;
+
+	default:
+		break;
+	}
+	if (pause_result & (1<<0))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+	if (pause_result & (1<<1))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 ld_pause;		/* local */
+	u16 lp_pause;		/* link partner */
+	u16 pause_result;
+	u8 ret = 0;
+	/* read twice */
+
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+		vars->flow_ctrl = phy->req_flow_ctrl;
+	else if (phy->req_line_speed != SPEED_AUTO_NEG)
+		vars->flow_ctrl = params->req_fc_auto_adv;
+	else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+		ret = 1;
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616) {
+			bnx2x_cl22_read(bp, phy,
+					0x4, &ld_pause);
+			bnx2x_cl22_read(bp, phy,
+					0x5, &lp_pause);
+		} else {
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+		}
+		pause_result = (ld_pause &
+				MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+		pause_result |= (lp_pause &
+				 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+		DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+		   pause_result);
+		bnx2x_pause_resolve(vars, pause_result);
+	}
+	return ret;
+}
+/******************************************************************/
+/*			Warpcore section			  */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+					struct link_params *params,
+					struct link_vars *vars) {
+	u16 val16 = 0, lane;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+	/* Check adding advertisement for 1G KX */
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (vars->line_speed == SPEED_1000)) {
+		u16 sd_digital;
+		val16 |= (1<<5);
+
+		/* Enable CL37 1G Parallel Detect */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+				 (sd_digital | 0x1));
+
+		DP(NETIF_MSG_LINK, "Advertize 1G\n");
+	}
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+	    (vars->line_speed ==  SPEED_10000)) {
+		/* Check adding advertisement for 10G KR */
+		val16 |= (1<<7);
+		/* Enable 10G Parallel Detect */
+		bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+				MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+
+		DP(NETIF_MSG_LINK, "Advertize 10G\n");
+	}
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+		      MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+		     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		      (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		      (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+			 0x03f0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+			 0x03f0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 0x383f);
+
+	/* Advertised speeds */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+
+	/* Advertise pause */
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+
+	/* Enable Autoneg */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+
+	/* Over 1G - AN local device user page 1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL5_MISC7, &val16);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+
+	/* Disable Autoneg */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL3_UP1, 0x1);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
+
+	/* Disable CL36 PCS Tx */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
+
+	/* Double Wide Single Data Rate @ pll rate */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
+
+	/* Leave cl72 training enable, needed for KR */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+		MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
+		0x2);
+
+	/* Leave CL72 enabled */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 val | 0x3800);
+
+	/* Set speed via PMA/PMD register */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+	/*Enable encoded forced speed */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+	/* Turn TX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+	/* Turn RX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+	/* set and clear loopback to cause a reset to 64/66 decoder */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       u8 is_xfi)
+{
+	struct bnx2x *bp = params->bp;
+	u16 misc1_val, tap_val, tx_driver_val, lane, val;
+	/* Hold rxSeqStart */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+
+	/* Hold tx_fifo_reset */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+
+	/* Disable CL73 AN */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+	/* Disable 100FX Enable and Auto-Detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_FX100_CTRL1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+
+	/* Disable 100FX Idle detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_FX100_CTRL3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+
+	/* Set Block address to Remote PHY & Clear forced_speed[5] */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL4_MISC3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+
+	/* Turn off auto-detect & fiber mode */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			 (val & 0xFFEE));
+
+	/* Set filter_force_link, disable_false_link and parallel_detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			 ((val | 0x0006) & 0xFFFE));
+
+	/* Set XFI / SFI */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+	misc1_val &= ~(0x1f);
+
+	if (is_xfi) {
+		misc1_val |= 0x5;
+		tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			   (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			   (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+		tx_driver_val =
+		      ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		       (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+	} else {
+		misc1_val |= 0x9;
+		tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			   (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			   (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+		tx_driver_val =
+		      ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			 tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+			 tx_driver_val);
+
+	/* Enable fiber mode, enable and invert sig_det */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+
+	/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL4_MISC3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+
+	/* 10G XFI Full Duplex */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+	/* Release tx_fifo_reset */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+
+	/* Release rxSeqStart */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+}
+
+static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
+				       struct bnx2x_phy *phy)
+{
+	DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+					 struct bnx2x_phy *phy,
+					 u16 lane)
+{
+	/* Rx0 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+	/* Rx2 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+	/* Serdes Digital Misc1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+	/* Serdes Digital4 Misc3 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+	/* Set Transmit PMD settings */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
+			 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+		      MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+		     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		      (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		      (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+					   struct link_params *params,
+					   u8 fiber_mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16, digctrl_kx1, digctrl_kx2;
+	u8 lane;
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+
+	/* Clear XFI clock comp in non-10G single lane mode. */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_RX66_CONTROL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+
+	if (phy->req_line_speed == SPEED_AUTO_NEG) {
+		/* SGMII Autoneg */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+				 val16 | 0x1000);
+		DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		val16 &= 0xcfbf;
+		switch (phy->req_line_speed) {
+		case SPEED_10:
+			break;
+		case SPEED_100:
+			val16 |= 0x2000;
+			break;
+		case SPEED_1000:
+			val16 |= 0x0040;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Speed not supported: 0x%x"
+					   "\n", phy->req_line_speed);
+			return;
+		}
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			val16 |= 0x0100;
+
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+		DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+			       phy->req_line_speed);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		DP(NETIF_MSG_LINK, "  (readback) %x\n", val16);
+	}
+
+	/* SGMII Slave mode and disable signal detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+	if (fiber_mode)
+		digctrl_kx1 = 1;
+	else
+		digctrl_kx1 &= 0xff4a;
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			digctrl_kx1);
+
+	/* Turn off parallel detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 & ~(1<<2)));
+
+	/* Re-enable parallel detect */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 | (1<<2)));
+
+	/* Enable autodet */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			(digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 reset)
+{
+	u16 val;
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL5_MISC6, &val);
+	if (reset)
+		val |= 0xC000;
+	else
+		val &= 0x3FFF;
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, val);
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+
+	/* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      u16 lane)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+
+	/* Set XFI clock comp as default. */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_RX66_CONTROL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL1, 0x014a);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL3, 0x0800);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+						u32 chip_id,
+						u32 shmem_base, u8 port,
+						u8 *gpio_num, u8 *gpio_port)
+{
+	u32 cfg_pin;
+	*gpio_num = 0;
+	*gpio_port = 0;
+	if (CHIP_IS_E3(bp)) {
+		cfg_pin = (REG_RD(bp, shmem_base +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+				PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+				PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+		/*
+		 * Should not happen. This function called upon interrupt
+		 * triggered by GPIO ( since EPIO can only generate interrupts
+		 * to MCP).
+		 * So if this function was called and none of the GPIOs was set,
+		 * it means the shit hit the fan.
+		 */
+		if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+		    (cfg_pin > PIN_CFG_GPIO3_P1)) {
+			DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for "
+					   "module detect indication\n",
+				       cfg_pin);
+			return -EINVAL;
+		}
+
+		*gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+		*gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+	} else {
+		*gpio_num = MISC_REGISTERS_GPIO_3;
+		*gpio_port = port;
+	}
+	DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+	return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_num, gpio_port;
+	u32 gpio_val;
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+				      params->shmem_base, params->port,
+				      &gpio_num, &gpio_port) != 0)
+		return 0;
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+	/* Call the handling function in case module is detected */
+	if (gpio_val == 0)
+		return 1;
+	else
+		return 0;
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 serdes_net_if;
+	u8 fiber_mode;
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	serdes_net_if = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+			 PORT_HW_CFG_NET_SERDES_IF_MASK);
+	DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+			   "serdes_net_if = 0x%x\n",
+		       vars->line_speed, serdes_net_if);
+	bnx2x_set_aer_mmd(params, phy);
+
+	vars->phy_flags |= PHY_XGXS_FLAG;
+	if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+	    (phy->req_line_speed &&
+	     ((phy->req_line_speed == SPEED_100) ||
+	      (phy->req_line_speed == SPEED_10)))) {
+		vars->phy_flags |= PHY_SGMII_FLAG;
+		DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+		bnx2x_warpcore_clear_regs(phy, params, lane);
+		bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+	} else {
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			/* Enable KR Auto Neg */
+			if (params->loopback_mode == LOOPBACK_NONE)
+				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+			else {
+				DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+				bnx2x_warpcore_set_10G_KR(phy, params, vars);
+			}
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			bnx2x_warpcore_clear_regs(phy, params, lane);
+			if (vars->line_speed == SPEED_10000) {
+				DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+				bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+			} else {
+				if (SINGLE_MEDIA_DIRECT(params)) {
+					DP(NETIF_MSG_LINK, "1G Fiber\n");
+					fiber_mode = 1;
+				} else {
+					DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+					fiber_mode = 0;
+				}
+				bnx2x_warpcore_set_sgmii_speed(phy,
+								params,
+								fiber_mode);
+			}
+
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+
+			bnx2x_warpcore_clear_regs(phy, params, lane);
+			if (vars->line_speed == SPEED_10000) {
+				DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+				bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+			} else if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+				bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+			}
+			/* Issue Module detection */
+			if (bnx2x_is_sfp_module_plugged(phy, params))
+				bnx2x_sfp_module_detection(phy, params);
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			if (vars->line_speed != SPEED_20000) {
+				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+				return;
+			}
+			DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+			bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+			/* Issue Module detection */
+
+			bnx2x_sfp_module_detection(phy, params);
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			if (vars->line_speed != SPEED_20000) {
+				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+				return;
+			}
+			DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
+			bnx2x_warpcore_set_20G_KR2(bp, phy);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface "
+					   "0x%x\n", serdes_net_if);
+			return;
+		}
+	}
+
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+	DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+					 struct bnx2x_phy *phy,
+					 u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+
+	cfg_pin = REG_RD(bp, params->shmem_base +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+				PORT_HW_CFG_TX_LASER_MASK;
+	/* Set the !tx_en since this pin is DISABLE_TX_LASER */
+	DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+	/* For 20G, the expected pin to be used is 3 pins after the current */
+
+	bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+		bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+	bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+	bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+	bnx2x_set_aer_mmd(params, phy);
+	/* Global register */
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+	/* Clear loopback settings (if any) */
+	/* 10G & 20G */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
+			 0xBFFF);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+
+	/* Update those 1-copy registers */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+		/* Enable 1G MDIO (1-copy) */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+			&val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+			 val16 & ~0x10);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+			 val16 & 0xff00);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+	u32 lane;
+	DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+		       params->loopback_mode, phy->req_line_speed);
+
+	if (phy->req_line_speed < SPEED_10000) {
+		/* 10/100/1000 */
+
+		/* Update those 1-copy registers */
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, 0);
+		/* Enable 1G MDIO (1-copy) */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				&val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				val16 | 0x10);
+		/* Set 1G loopback based on lane (1-copy) */
+		lane = bnx2x_get_warpcore_lane(phy, params);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+				val16 | (1<<lane));
+
+		/* Switch back to 4-copy registers */
+		bnx2x_set_aer_mmd(params, phy);
+		/* Global loopback, not recommended. */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+				0x4000);
+	} else {
+		/* 10G & 20G */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+				 0x4000);
+
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+	}
+}
+
 
 void bnx2x_link_status_update(struct link_params *params,
 			      struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u8 link_10g;
+	u8 link_10g_plus;
 	u8 port = params->port;
+	u32 sync_offset, media_types;
+	/* Update PHY configuration */
+	set_phy_vars(params, vars);
 
 	vars->link_status = REG_RD(bp, params->shmem_base +
 				   offsetof(struct shmem_region,
 					    port_mb[port].link_status));
 
 	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-
+	vars->phy_flags = PHY_XGXS_FLAG;
 	if (vars->link_up) {
 		DP(NETIF_MSG_LINK, "phy link up\n");
 
@@ -1664,27 +4375,9 @@
 			case LINK_10GTFD:
 				vars->line_speed = SPEED_10000;
 				break;
-
-			case LINK_12GTFD:
-				vars->line_speed = SPEED_12000;
+			case LINK_20GTFD:
+				vars->line_speed = SPEED_20000;
 				break;
-
-			case LINK_12_5GTFD:
-				vars->line_speed = SPEED_12500;
-				break;
-
-			case LINK_13GTFD:
-				vars->line_speed = SPEED_13000;
-				break;
-
-			case LINK_15GTFD:
-				vars->line_speed = SPEED_15000;
-				break;
-
-			case LINK_16GTFD:
-				vars->line_speed = SPEED_16000;
-				break;
-
 			default:
 				break;
 		}
@@ -1705,19 +4398,24 @@
 		} else {
 			vars->phy_flags &= ~PHY_SGMII_FLAG;
 		}
-
+		if (vars->line_speed &&
+		    USES_WARPCORE(bp) &&
+		    (vars->line_speed == SPEED_1000))
+			vars->phy_flags |= PHY_SGMII_FLAG;
 		/* anything 10 and over uses the bmac */
-		link_10g = ((vars->line_speed == SPEED_10000) ||
-			    (vars->line_speed == SPEED_12000) ||
-			    (vars->line_speed == SPEED_12500) ||
-			    (vars->line_speed == SPEED_13000) ||
-			    (vars->line_speed == SPEED_15000) ||
-			    (vars->line_speed == SPEED_16000));
-		if (link_10g)
-			vars->mac_type = MAC_TYPE_BMAC;
-		else
-			vars->mac_type = MAC_TYPE_EMAC;
+		link_10g_plus = (vars->line_speed >= SPEED_10000);
 
+		if (link_10g_plus) {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_XMAC;
+			else
+				vars->mac_type = MAC_TYPE_BMAC;
+		} else {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_UMAC;
+			else
+				vars->mac_type = MAC_TYPE_EMAC;
+		}
 	} else { /* link down */
 		DP(NETIF_MSG_LINK, "phy link down\n");
 
@@ -1731,8 +4429,32 @@
 		vars->mac_type = MAC_TYPE_NONE;
 	}
 
-	DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x\n",
-		 vars->link_status, vars->phy_link_up);
+	/* Sync media type */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+
+	params->phy[INT_PHY].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+	params->phy[EXT_PHY1].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+	params->phy[EXT_PHY2].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+	DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+	/* Sync AEU offset */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].aeu_int_mask);
+
+	vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+	DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
+		 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
 	DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
 		 vars->line_speed, vars->duplex, vars->flow_ctrl);
 }
@@ -1759,9 +4481,9 @@
 			  (new_master_ln | ser_lane));
 }
 
-static u8 bnx2x_reset_unicore(struct link_params *params,
-			      struct bnx2x_phy *phy,
-			      u8 set_serdes)
+static int bnx2x_reset_unicore(struct link_params *params,
+			       struct bnx2x_phy *phy,
+			       u8 set_serdes)
 {
 	struct bnx2x *bp = params->bp;
 	u16 mii_control;
@@ -2048,9 +4770,6 @@
 		if (vars->line_speed == SPEED_10000)
 			reg_val |=
 				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
-		if (vars->line_speed == SPEED_13000)
-			reg_val |=
-				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
 	}
 
 	CL22_WR_OVER_CL45(bp, phy,
@@ -2059,8 +4778,8 @@
 
 }
 
-static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
-					     struct link_params *params)
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val = 0;
@@ -2081,44 +4800,9 @@
 			  MDIO_OVER_1G_UP3, 0x400);
 }
 
-static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
-				     struct link_params *params, u16 *ieee_fc)
-{
-	struct bnx2x *bp = params->bp;
-	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-	/*
-	 * Resolve pause mode and advertisement.
-	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
-	 */
-
-	switch (phy->req_flow_ctrl) {
-	case BNX2X_FLOW_CTRL_AUTO:
-		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
-			*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-		else
-			*ieee_fc |=
-			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-		break;
-	case BNX2X_FLOW_CTRL_TX:
-		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-		break;
-
-	case BNX2X_FLOW_CTRL_RX:
-	case BNX2X_FLOW_CTRL_BOTH:
-		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-		break;
-
-	case BNX2X_FLOW_CTRL_NONE:
-	default:
-		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
-		break;
-	}
-	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
-}
-
-static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
-					     struct link_params *params,
-					     u16 ieee_fc)
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params,
+					      u16 ieee_fc)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val;
@@ -2252,35 +4936,8 @@
  * link management
  */
 
-static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{						/*  LD	    LP	 */
-	switch (pause_result) {			/* ASYM P ASYM P */
-	case 0xb:				/*   1  0   1  1 */
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
-		break;
-
-	case 0xe:				/*   1  1   1  0 */
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
-		break;
-
-	case 0x5:				/*   0  1   0  1 */
-	case 0x7:				/*   0  1   1  1 */
-	case 0xd:				/*   1  1   0  1 */
-	case 0xf:				/*   1  1   1  1 */
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
-		break;
-
-	default:
-		break;
-	}
-	if (pause_result & (1<<0))
-		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
-	if (pause_result & (1<<1))
-		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
-}
-
-static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
-					    struct link_params *params)
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+					     struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u16 pd_10g, status2_1000x;
@@ -2383,7 +5040,7 @@
 					 struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
-	u16 rx_status, ustat_val, cl37_fsm_recieved;
+	u16 rx_status, ustat_val, cl37_fsm_received;
 	DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
 	/* Step 1: Make sure signal is detected */
 	CL22_RD_OVER_CL45(bp, phy,
@@ -2421,15 +5078,15 @@
 	CL22_RD_OVER_CL45(bp, phy,
 			  MDIO_REG_BANK_REMOTE_PHY,
 			  MDIO_REMOTE_PHY_MISC_RX_STATUS,
-			  &cl37_fsm_recieved);
-	if ((cl37_fsm_recieved &
+			  &cl37_fsm_received);
+	if ((cl37_fsm_received &
 	     (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
 	     MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
 	    (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
 	      MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
 		DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
 			     "misc_rx_status(0x8330) = 0x%x\n",
-			 cl37_fsm_recieved);
+			 cl37_fsm_received);
 		return;
 	}
 	/*
@@ -2462,45 +5119,25 @@
 		vars->link_status |=
 			LINK_STATUS_PARALLEL_DETECTION_USED;
 }
-
-static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
 				     struct link_params *params,
-				     struct link_vars *vars)
+				      struct link_vars *vars,
+				      u16 is_link_up,
+				      u16 speed_mask,
+				      u16 is_duplex)
 {
 	struct bnx2x *bp = params->bp;
-	u16 new_line_speed, gp_status;
-	u8 rc = 0;
-
-	/* Read gp_status */
-	CL22_RD_OVER_CL45(bp, phy,
-			  MDIO_REG_BANK_GP_STATUS,
-			  MDIO_GP_STATUS_TOP_AN_STATUS1,
-			  &gp_status);
-
 	if (phy->req_line_speed == SPEED_AUTO_NEG)
 		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
-	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
-		DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
-			 gp_status);
+	if (is_link_up) {
+		DP(NETIF_MSG_LINK, "phy link up\n");
 
 		vars->phy_link_up = 1;
 		vars->link_status |= LINK_STATUS_LINK_UP;
 
-		if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
-			vars->duplex = DUPLEX_FULL;
-		else
-			vars->duplex = DUPLEX_HALF;
-
-		if (SINGLE_MEDIA_DIRECT(params)) {
-			bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
-			if (phy->req_line_speed == SPEED_AUTO_NEG)
-				bnx2x_xgxs_an_resolve(phy, params, vars,
-						      gp_status);
-		}
-
-		switch (gp_status & GP_STATUS_SPEED_MASK) {
+		switch (speed_mask) {
 		case GP_STATUS_10M:
-			new_line_speed = SPEED_10;
+			vars->line_speed = SPEED_10;
 			if (vars->duplex == DUPLEX_FULL)
 				vars->link_status |= LINK_10TFD;
 			else
@@ -2508,7 +5145,7 @@
 			break;
 
 		case GP_STATUS_100M:
-			new_line_speed = SPEED_100;
+			vars->line_speed = SPEED_100;
 			if (vars->duplex == DUPLEX_FULL)
 				vars->link_status |= LINK_100TXFD;
 			else
@@ -2517,7 +5154,7 @@
 
 		case GP_STATUS_1G:
 		case GP_STATUS_1G_KX:
-			new_line_speed = SPEED_1000;
+			vars->line_speed = SPEED_1000;
 			if (vars->duplex == DUPLEX_FULL)
 				vars->link_status |= LINK_1000TFD;
 			else
@@ -2525,7 +5162,7 @@
 			break;
 
 		case GP_STATUS_2_5G:
-			new_line_speed = SPEED_2500;
+			vars->line_speed = SPEED_2500;
 			if (vars->duplex == DUPLEX_FULL)
 				vars->link_status |= LINK_2500TFD;
 			else
@@ -2536,50 +5173,28 @@
 		case GP_STATUS_6G:
 			DP(NETIF_MSG_LINK,
 				 "link speed unsupported  gp_status 0x%x\n",
-				  gp_status);
+				  speed_mask);
 			return -EINVAL;
 
 		case GP_STATUS_10G_KX4:
 		case GP_STATUS_10G_HIG:
 		case GP_STATUS_10G_CX4:
-			new_line_speed = SPEED_10000;
+		case GP_STATUS_10G_KR:
+		case GP_STATUS_10G_SFI:
+		case GP_STATUS_10G_XFI:
+			vars->line_speed = SPEED_10000;
 			vars->link_status |= LINK_10GTFD;
 			break;
-
-		case GP_STATUS_12G_HIG:
-			new_line_speed = SPEED_12000;
-			vars->link_status |= LINK_12GTFD;
+		case GP_STATUS_20G_DXGXS:
+			vars->line_speed = SPEED_20000;
+			vars->link_status |= LINK_20GTFD;
 			break;
-
-		case GP_STATUS_12_5G:
-			new_line_speed = SPEED_12500;
-			vars->link_status |= LINK_12_5GTFD;
-			break;
-
-		case GP_STATUS_13G:
-			new_line_speed = SPEED_13000;
-			vars->link_status |= LINK_13GTFD;
-			break;
-
-		case GP_STATUS_15G:
-			new_line_speed = SPEED_15000;
-			vars->link_status |= LINK_15GTFD;
-			break;
-
-		case GP_STATUS_16G:
-			new_line_speed = SPEED_16000;
-			vars->link_status |= LINK_16GTFD;
-			break;
-
 		default:
 			DP(NETIF_MSG_LINK,
 				  "link speed unsupported gp_status 0x%x\n",
-				  gp_status);
+				  speed_mask);
 			return -EINVAL;
 		}
-
-		vars->line_speed = new_line_speed;
-
 	} else { /* link_down */
 		DP(NETIF_MSG_LINK, "phy link down\n");
 
@@ -2588,7 +5203,47 @@
 		vars->duplex = DUPLEX_FULL;
 		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 		vars->mac_type = MAC_TYPE_NONE;
+	}
+	DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+		    vars->phy_link_up, vars->line_speed);
+	return 0;
+}
 
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+
+	struct bnx2x *bp = params->bp;
+
+	u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+	int rc = 0;
+
+	/* Read gp_status */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+		duplex = DUPLEX_FULL;
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+		link_up = 1;
+	speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+	DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+		       gp_status, link_up, speed_mask);
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+					 duplex);
+	if (rc == -EINVAL)
+		return rc;
+
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+		if (SINGLE_MEDIA_DIRECT(params)) {
+			bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+			if (phy->req_line_speed == SPEED_AUTO_NEG)
+				bnx2x_xgxs_an_resolve(phy, params, vars,
+						      gp_status);
+		}
+	} else { /* link_down */
 		if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
 		    SINGLE_MEDIA_DIRECT(params)) {
 			/* Check signal is detected */
@@ -2596,13 +5251,86 @@
 		}
 	}
 
-	DP(NETIF_MSG_LINK, "gp_status 0x%x  phy_link_up %x line_speed %x\n",
-		 gp_status, vars->phy_link_up, vars->line_speed);
 	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
 		   vars->duplex, vars->flow_ctrl, vars->link_status);
 	return rc;
 }
 
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
+{
+
+	struct bnx2x *bp = params->bp;
+
+	u8 lane;
+	u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+	int rc = 0;
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	/* Read gp_status */
+	if (phy->req_line_speed > SPEED_10000) {
+		u16 temp_link_up;
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &temp_link_up);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &link_up);
+		DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+			       temp_link_up, link_up);
+		link_up &= (1<<2);
+		if (link_up)
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+		DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+		/* Check for either KR or generic link up. */
+		gp_status1 = ((gp_status1 >> 8) & 0xf) |
+			((gp_status1 >> 12) & 0xf);
+		link_up = gp_status1 & (1 << lane);
+		if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+			u16 pd, gp_status4;
+			if (phy->req_line_speed == SPEED_AUTO_NEG) {
+				/* Check Autoneg complete */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_GP2_STATUS_GP_2_4,
+						&gp_status4);
+				if (gp_status4 & ((1<<12)<<lane))
+					vars->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+				/* Check parallel detect used */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_PAR_DET_10G_STATUS,
+						&pd);
+				if (pd & (1<<15))
+					vars->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+			}
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		}
+	}
+
+	if (lane < 2) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+	}
+	DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+	if ((lane & 1) == 0)
+		gp_speed <<= 8;
+	gp_speed &= 0x3f00;
+
+
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+					 duplex);
+
+	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
+		   vars->duplex, vars->flow_ctrl, vars->link_status);
+	return rc;
+}
 static void bnx2x_set_gmii_tx_driver(struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
@@ -2642,8 +5370,8 @@
 	}
 }
 
-static u8 bnx2x_emac_program(struct link_params *params,
-			     struct link_vars *vars)
+static int bnx2x_emac_program(struct link_params *params,
+			      struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -2713,9 +5441,9 @@
 	}
 }
 
-static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
-				    struct link_params *params,
-				    struct link_vars *vars)
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
@@ -2742,11 +5470,11 @@
 			DP(NETIF_MSG_LINK, "not SGMII, AN\n");
 
 			/* AN enabled */
-			bnx2x_set_brcm_cl37_advertisment(phy, params);
+			bnx2x_set_brcm_cl37_advertisement(phy, params);
 
 			/* program duplex & pause advertisement (for aneg) */
-			bnx2x_set_ieee_aneg_advertisment(phy, params,
-							 vars->ieee_fc);
+			bnx2x_set_ieee_aneg_advertisement(phy, params,
+							  vars->ieee_fc);
 
 			/* enable autoneg */
 			bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2762,29 +5490,12 @@
 	}
 }
 
-static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
-			    struct link_params *params,
-			    struct link_vars *vars)
-{
-	u8 rc;
-	vars->phy_flags |= PHY_SGMII_FLAG;
-	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
-	bnx2x_set_aer_mmd_serdes(params->bp, phy);
-	rc = bnx2x_reset_unicore(params, phy, 1);
-	/* reset the SerDes and wait for reset bit return low */
-	if (rc != 0)
-		return rc;
-	bnx2x_set_aer_mmd_serdes(params->bp, phy);
-
-	return rc;
-}
-
-static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
 			  struct link_params *params,
 			  struct link_vars *vars)
 {
-	u8 rc;
-	vars->phy_flags = PHY_XGXS_FLAG;
+	int rc;
+	vars->phy_flags |= PHY_XGXS_FLAG;
 	if ((phy->req_line_speed &&
 	     ((phy->req_line_speed == SPEED_100) ||
 	      (phy->req_line_speed == SPEED_10))) ||
@@ -2792,26 +5503,28 @@
 	     (phy->speed_cap_mask >=
 	      PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
 	     (phy->speed_cap_mask <
-	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
-	     ))
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
 		vars->phy_flags |= PHY_SGMII_FLAG;
 	else
 		vars->phy_flags &= ~PHY_SGMII_FLAG;
 
 	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
-	bnx2x_set_aer_mmd_xgxs(params, phy);
-	bnx2x_set_master_ln(params, phy);
+	bnx2x_set_aer_mmd(params, phy);
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bnx2x_set_master_ln(params, phy);
 
 	rc = bnx2x_reset_unicore(params, phy, 0);
 	/* reset the SerDes and wait for reset bit return low */
 	if (rc != 0)
 		return rc;
 
-	bnx2x_set_aer_mmd_xgxs(params, phy);
-
+	bnx2x_set_aer_mmd(params, phy);
 	/* setting the masterLn_def again after the reset */
-	bnx2x_set_master_ln(params, phy);
-	bnx2x_set_swap_lanes(params, phy);
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+		bnx2x_set_master_ln(params, phy);
+		bnx2x_set_swap_lanes(params, phy);
+	}
 
 	return rc;
 }
@@ -2823,8 +5536,13 @@
 	u16 cnt, ctrl;
 	/* Wait for soft reset to get cleared up to 1 sec */
 	for (cnt = 0; cnt < 1000; cnt++) {
-		bnx2x_cl45_read(bp, phy,
-				MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616)
+			bnx2x_cl22_read(bp, phy,
+				MDIO_PMA_REG_CTRL, &ctrl);
+		else
+			bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, &ctrl);
 		if (!(ctrl & (1<<15)))
 			break;
 		msleep(1);
@@ -2845,7 +5563,11 @@
 	struct bnx2x *bp = params->bp;
 
 	/* Setting the status to report on link up for either XGXS or SerDes */
-	if (params->switch_cfg == SWITCH_CFG_10G) {
+	if (CHIP_IS_E3(bp)) {
+		mask = NIG_MASK_XGXS0_LINK_STATUS;
+		if (!(SINGLE_MEDIA_DIRECT(params)))
+			mask |= NIG_MASK_MI_INT;
+	} else if (params->switch_cfg == SWITCH_CFG_10G) {
 		mask = (NIG_MASK_XGXS0_LINK10G |
 			NIG_MASK_XGXS0_LINK_STATUS);
 		DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
@@ -2918,11 +5640,11 @@
 }
 
 static void bnx2x_link_int_ack(struct link_params *params,
-			       struct link_vars *vars, u8 is_10g)
+			       struct link_vars *vars, u8 is_10g_plus)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
-
+	u32 mask;
 	/*
 	 * First reset all status we assume only one line will be
 	 * change at a time
@@ -2932,47 +5654,34 @@
 			NIG_STATUS_XGXS0_LINK_STATUS |
 			NIG_STATUS_SERDES0_LINK_STATUS));
 	if (vars->phy_link_up) {
-		if (is_10g) {
-			/*
-			 * Disable the 10G link interrupt by writing 1 to the
-			 * status register
-			 */
-			DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
-			bnx2x_bits_en(bp,
-				      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-				      NIG_STATUS_XGXS0_LINK10G);
-
-		} else if (params->switch_cfg == SWITCH_CFG_10G) {
-			/*
-			 * Disable the link interrupt by writing 1 to the
-			 * relevant lane in the status register
-			 */
-			u32 ser_lane = ((params->lane_config &
+		if (USES_WARPCORE(bp))
+			mask = NIG_STATUS_XGXS0_LINK_STATUS;
+		else {
+			if (is_10g_plus)
+				mask = NIG_STATUS_XGXS0_LINK10G;
+			else if (params->switch_cfg == SWITCH_CFG_10G) {
+				/*
+				 * Disable the link interrupt by writing 1 to
+				 * the relevant lane in the status register
+				 */
+				u32 ser_lane =
+					((params->lane_config &
 				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
 				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
-
-			DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
-				 vars->line_speed);
-			bnx2x_bits_en(bp,
-				      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-				      ((1 << ser_lane) <<
-				       NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
-
-		} else { /* SerDes */
-			DP(NETIF_MSG_LINK, "SerDes phy link up\n");
-			/*
-			 * Disable the link interrupt by writing 1 to the status
-			 * register
-			 */
-			bnx2x_bits_en(bp,
-				      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-				      NIG_STATUS_SERDES0_LINK_STATUS);
+				mask = ((1 << ser_lane) <<
+				       NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+			} else
+				mask = NIG_STATUS_SERDES0_LINK_STATUS;
 		}
-
+		DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+			       mask);
+		bnx2x_bits_en(bp,
+			      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+			      mask);
 	}
 }
 
-static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
 {
 	u8 *str_ptr = str;
 	u32 mask = 0xf0000000;
@@ -3011,19 +5720,19 @@
 }
 
 
-static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
 {
 	str[0] = '\0';
 	(*len)--;
 	return 0;
 }
 
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
-			      u8 *version, u16 len)
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+				 u8 *version, u16 len)
 {
 	struct bnx2x *bp;
 	u32 spirom_ver = 0;
-	u8 status = 0;
+	int status = 0;
 	u8 *ver_p = version;
 	u16 remain_len = len;
 	if (version == NULL || params == NULL)
@@ -3065,15 +5774,18 @@
 	struct bnx2x *bp = params->bp;
 
 	if (phy->req_line_speed != SPEED_1000) {
-		u32 md_devad;
+		u32 md_devad = 0;
 
 		DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
 
-		/* change the uni_phy_addr in the nig */
-		md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
-				       port*0x18));
+		if (!CHIP_IS_E3(bp)) {
+			/* change the uni_phy_addr in the nig */
+			md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+					       port*0x18));
 
-		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       0x5);
+		}
 
 		bnx2x_cl45_write(bp, phy,
 				 5,
@@ -3088,10 +5800,13 @@
 				 0x6041);
 		msleep(200);
 		/* set aer mmd back */
-		bnx2x_set_aer_mmd_xgxs(params, phy);
+		bnx2x_set_aer_mmd(params, phy);
 
-		/* and md_devad */
-		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
+		if (!CHIP_IS_E3(bp)) {
+			/* and md_devad */
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       md_devad);
+		}
 	} else {
 		u16 mii_ctrl;
 		DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3107,12 +5822,13 @@
 	}
 }
 
-u8 bnx2x_set_led(struct link_params *params,
-		 struct link_vars *vars, u8 mode, u32 speed)
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed)
 {
 	u8 port = params->port;
 	u16 hw_led_mode = params->hw_led_mode;
-	u8 rc = 0, phy_idx;
+	int rc = 0;
+	u8 phy_idx;
 	u32 tmp;
 	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
 	struct bnx2x *bp = params->bp;
@@ -3146,8 +5862,10 @@
 		if (!vars->link_up)
 			break;
 	case LED_MODE_ON:
-		if (params->phy[EXT_PHY1].type ==
-		    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+		if (((params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+			 (params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
 		    CHIP_IS_E2(bp) && params->num_phys == 2) {
 			/*
 			 * This is a work-around for E2+8727 Configurations
@@ -3162,7 +5880,9 @@
 					(tmp | EMAC_LED_OVERRIDE));
 				return rc;
 			}
-		} else if (SINGLE_MEDIA_DIRECT(params)) {
+		} else if (SINGLE_MEDIA_DIRECT(params) &&
+			   (CHIP_IS_E1x(bp) ||
+			    CHIP_IS_E2(bp))) {
 			/*
 			 * This is a work-around for HW issue found when link
 			 * is up in CL73
@@ -3214,21 +5934,49 @@
  * This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
-u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
-		   u8 is_serdes)
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes)
 {
 	struct bnx2x *bp = params->bp;
 	u16 gp_status = 0, phy_index = 0;
 	u8 ext_phy_link_up = 0, serdes_phy_type;
 	struct link_vars temp_vars;
+	struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
 
-	CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+	if (CHIP_IS_E3(bp)) {
+		u16 link_up;
+		if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+		    > SPEED_10000) {
+			/* Check 20G link */
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			link_up &= (1<<2);
+		} else {
+			/* Check 10G link and below*/
+			u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					MDIO_WC_REG_GP2_STATUS_GP_2_1,
+					&gp_status);
+			gp_status = ((gp_status >> 8) & 0xf) |
+				((gp_status >> 12) & 0xf);
+			link_up = gp_status & (1 << lane);
+		}
+		if (!link_up)
+			return -ESRCH;
+	} else {
+		CL22_RD_OVER_CL45(bp, int_phy,
 			  MDIO_REG_BANK_GP_STATUS,
 			  MDIO_GP_STATUS_TOP_AN_STATUS1,
 			  &gp_status);
 	/* link is up only if both local phy and external phy are up */
 	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
 		return -ESRCH;
+	}
+	/* In XGXS loopback mode, do not check external PHY */
+	if (params->loopback_mode == LOOPBACK_XGXS)
+		return 0;
 
 	switch (params->num_phys) {
 	case 1:
@@ -3245,7 +5993,9 @@
 			serdes_phy_type = ((params->phy[phy_index].media_type ==
 					    ETH_PHY_SFP_FIBER) ||
 					   (params->phy[phy_index].media_type ==
-					    ETH_PHY_XFP_FIBER));
+					    ETH_PHY_XFP_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_DA_TWINAX));
 
 			if (is_serdes != serdes_phy_type)
 				continue;
@@ -3263,10 +6013,10 @@
 	return -ESRCH;
 }
 
-static u8 bnx2x_link_initialize(struct link_params *params,
-				struct link_vars *vars)
+static int bnx2x_link_initialize(struct link_params *params,
+				 struct link_vars *vars)
 {
-	u8 rc = 0;
+	int rc = 0;
 	u8 phy_index, non_ext_phy;
 	struct bnx2x *bp = params->bp;
 	/*
@@ -3282,12 +6032,8 @@
 	 * (no external phys), or this board has external phy which requires
 	 * to first.
 	 */
-
-	if (params->phy[INT_PHY].config_init)
-		params->phy[INT_PHY].config_init(
-			&params->phy[INT_PHY],
-			params, vars);
-
+	if (!USES_WARPCORE(bp))
+		bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
 	/* init ext phy and enable link state int */
 	non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
 		       (params->loopback_mode == LOOPBACK_XGXS));
@@ -3296,13 +6042,22 @@
 	    (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
 	    (params->loopback_mode == LOOPBACK_EXT_PHY)) {
 		struct bnx2x_phy *phy = &params->phy[INT_PHY];
-		if (vars->line_speed == SPEED_AUTO_NEG)
+		if (vars->line_speed == SPEED_AUTO_NEG &&
+		    (CHIP_IS_E1x(bp) ||
+		     CHIP_IS_E2(bp)))
 			bnx2x_set_parallel_detection(phy, params);
-		bnx2x_init_internal_phy(phy, params, vars);
+			if (params->phy[INT_PHY].config_init)
+				params->phy[INT_PHY].config_init(phy,
+								 params,
+								 vars);
 	}
 
 	/* Init external phy*/
-	if (!non_ext_phy)
+	if (non_ext_phy) {
+		if (params->phy[INT_PHY].supported &
+		    SUPPORTED_FIBRE)
+			vars->link_status |= LINK_STATUS_SERDES_LINK;
+	} else {
 		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 		      phy_index++) {
 			/*
@@ -3311,17 +6066,22 @@
 			 * need to initialize the first phy, since they are
 			 * connected.
 			 */
+			if (params->phy[phy_index].supported &
+			    SUPPORTED_FIBRE)
+				vars->link_status |= LINK_STATUS_SERDES_LINK;
+
 			if (phy_index == EXT_PHY2 &&
 			    (bnx2x_phy_selection(params) ==
 			     PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
-				DP(NETIF_MSG_LINK, "Ignoring second phy\n");
+				DP(NETIF_MSG_LINK, "Not initializing"
+						" second phy\n");
 				continue;
 			}
 			params->phy[phy_index].config_init(
 				&params->phy[phy_index],
 				params, vars);
 		}
-
+	}
 	/* Reset the interrupt indication after phy was initialized */
 	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
 		       params->port*4,
@@ -3329,6 +6089,7 @@
 			NIG_STATUS_XGXS0_LINK_STATUS |
 			NIG_STATUS_SERDES0_LINK_STATUS |
 			NIG_MASK_MI_INT));
+	bnx2x_update_mng(params, vars->link_status);
 	return rc;
 }
 
@@ -3359,20 +6120,25 @@
 	DP(NETIF_MSG_LINK, "reset external PHY\n");
 }
 
-static u8 bnx2x_update_link_down(struct link_params *params,
-			       struct link_vars *vars)
+static int bnx2x_update_link_down(struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 
 	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
 	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
-
+	vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
 	/* indicate no mac active */
 	vars->mac_type = MAC_TYPE_NONE;
 
 	/* update shared memory */
-	vars->link_status = 0;
+	vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
+			       LINK_STATUS_LINK_UP |
+			       LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
+			       LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
+			       LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
+			       LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
 	vars->line_speed = 0;
 	bnx2x_update_mng(params, vars->link_status);
 
@@ -3380,26 +6146,34 @@
 	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
 
 	/* disable emac */
-	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
 	msleep(10);
-
-	/* reset BigMac */
-	bnx2x_bmac_rx_disable(bp, params->port);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	/* reset BigMac/Xmac */
+	if (CHIP_IS_E1x(bp) ||
+	    CHIP_IS_E2(bp)) {
+		bnx2x_bmac_rx_disable(bp, params->port);
+		REG_WR(bp, GRCBASE_MISC +
+		       MISC_REGISTERS_RESET_REG_2_CLEAR,
 	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	}
+	if (CHIP_IS_E3(bp))
+		bnx2x_xmac_disable(params);
+
 	return 0;
 }
 
-static u8 bnx2x_update_link_up(struct link_params *params,
-			     struct link_vars *vars,
-			     u8 link_10g)
+static int bnx2x_update_link_up(struct link_params *params,
+				struct link_vars *vars,
+				u8 link_10g)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
-	u8 rc = 0;
+	int rc = 0;
 
 	vars->link_status |= LINK_STATUS_LINK_UP;
+	vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
 
 	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 		vars->link_status |=
@@ -3408,25 +6182,48 @@
 	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 		vars->link_status |=
 			LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
-
-	if (link_10g) {
-		bnx2x_bmac_enable(params, vars, 0);
+	if (USES_WARPCORE(bp)) {
+		if (link_10g) {
+			if (bnx2x_xmac_enable(params, vars, 0) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
+		} else
+			bnx2x_umac_enable(params, vars, 0);
 		bnx2x_set_led(params, vars,
-			      LED_MODE_OPER, SPEED_10000);
-	} else {
-		rc = bnx2x_emac_program(params, vars);
+			      LED_MODE_OPER, vars->line_speed);
+	}
+	if ((CHIP_IS_E1x(bp) ||
+	     CHIP_IS_E2(bp))) {
+		if (link_10g) {
+			if (bnx2x_bmac_enable(params, vars, 0) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
 
-		bnx2x_emac_enable(params, vars, 0);
+			bnx2x_set_led(params, vars,
+				      LED_MODE_OPER, SPEED_10000);
+		} else {
+			rc = bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
 
-		/* AN complete? */
-		if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
-		    && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
-		    SINGLE_MEDIA_DIRECT(params))
-			bnx2x_set_gmii_tx_driver(params);
+			/* AN complete? */
+			if ((vars->link_status &
+			     LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+			    && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+			    SINGLE_MEDIA_DIRECT(params))
+				bnx2x_set_gmii_tx_driver(params);
+		}
 	}
 
 	/* PBF - link up */
-	if (!(CHIP_IS_E2(bp)))
+	if (CHIP_IS_E1x(bp))
 		rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
 				       vars->line_speed);
 
@@ -3451,17 +6248,18 @@
  *   external phy needs to be up, and at least one of the 2
  *   external phy link must be up.
  */
-u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	struct link_vars phy_vars[MAX_PHYS];
 	u8 port = params->port;
-	u8 link_10g, phy_index;
-	u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
+	u8 link_10g_plus, phy_index;
+	u8 ext_phy_link_up = 0, cur_link_up;
+	int rc = 0;
 	u8 is_mi_int = 0;
 	u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
 	u8 active_external_phy = INT_PHY;
-	vars->link_status = 0;
+	vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
 	for (phy_index = INT_PHY; phy_index < params->num_phys;
 	      phy_index++) {
 		phy_vars[phy_index].flow_ctrl = 0;
@@ -3470,8 +6268,12 @@
 		phy_vars[phy_index].duplex = DUPLEX_FULL;
 		phy_vars[phy_index].phy_link_up = 0;
 		phy_vars[phy_index].link_up = 0;
+		phy_vars[phy_index].fault_detected = 0;
 	}
 
+	if (USES_WARPCORE(bp))
+		bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
 	DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
 		 port, (vars->phy_flags & PHY_XGXS_FLAG),
 		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
@@ -3488,13 +6290,14 @@
 	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
 
 	/* disable emac */
-	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
 	/*
 	 * Step 1:
 	 * Check external link change only for external phys, and apply
 	 * priority selection between them in case the link on both phys
-	 * is up. Note that the instead of the common vars, a temporary
+	 * is up. Note that instead of the common vars, a temporary
 	 * vars argument is used since each phy may have different link/
 	 * speed/duplex result
 	 */
@@ -3601,6 +6404,8 @@
 		if (params->phy[active_external_phy].supported &
 		    SUPPORTED_FIBRE)
 			vars->link_status |= LINK_STATUS_SERDES_LINK;
+		else
+			vars->link_status &= ~LINK_STATUS_SERDES_LINK;
 		DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
 			   active_external_phy);
 	}
@@ -3640,14 +6445,9 @@
 	}
 
 	/* anything 10 and over uses the bmac */
-	link_10g = ((vars->line_speed == SPEED_10000) ||
-		    (vars->line_speed == SPEED_12000) ||
-		    (vars->line_speed == SPEED_12500) ||
-		    (vars->line_speed == SPEED_13000) ||
-		    (vars->line_speed == SPEED_15000) ||
-		    (vars->line_speed == SPEED_16000));
+	link_10g_plus = (vars->line_speed >= SPEED_10000);
 
-	bnx2x_link_int_ack(params, vars, link_10g);
+	bnx2x_link_int_ack(params, vars, link_10g_plus);
 
 	/*
 	 * In case external phy link is up, and internal link is down
@@ -3671,21 +6471,24 @@
 				vars->phy_flags |= PHY_SGMII_FLAG;
 			else
 				vars->phy_flags &= ~PHY_SGMII_FLAG;
-			bnx2x_init_internal_phy(&params->phy[INT_PHY],
-						params,
+
+			if (params->phy[INT_PHY].config_init)
+				params->phy[INT_PHY].config_init(
+					&params->phy[INT_PHY], params,
 						vars);
 		}
 	}
 	/*
 	 * Link is up only if both local phy and external phy (in case of
-	 * non-direct board) are up
+	 * non-direct board) are up and no fault detected on active PHY.
 	 */
 	vars->link_up = (vars->phy_link_up &&
 			 (ext_phy_link_up ||
-			  SINGLE_MEDIA_DIRECT(params)));
+			  SINGLE_MEDIA_DIRECT(params)) &&
+			 (phy_vars[active_external_phy].fault_detected == 0));
 
 	if (vars->link_up)
-		rc = bnx2x_update_link_up(params, vars, link_10g);
+		rc = bnx2x_update_link_up(params, vars, link_10g_plus);
 	else
 		rc = bnx2x_update_link_down(params, vars);
 
@@ -3729,69 +6532,6 @@
 				  phy->ver_addr);
 }
 
-static void bnx2x_ext_phy_set_pause(struct link_params *params,
-				    struct bnx2x_phy *phy,
-				    struct link_vars *vars)
-{
-	u16 val;
-	struct bnx2x *bp = params->bp;
-	/* read modify write pause advertizing */
-	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
-
-	val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
-
-	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
-	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
-	if ((vars->ieee_fc &
-	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
-	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
-		val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
-	}
-	if ((vars->ieee_fc &
-	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
-	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
-		val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
-	}
-	DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
-	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
-}
-
-static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
-				   struct link_params *params,
-				   struct link_vars *vars)
-{
-	struct bnx2x *bp = params->bp;
-	u16 ld_pause;		/* local */
-	u16 lp_pause;		/* link partner */
-	u16 pause_result;
-	u8 ret = 0;
-	/* read twice */
-
-	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
-	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
-		vars->flow_ctrl = phy->req_flow_ctrl;
-	else if (phy->req_line_speed != SPEED_AUTO_NEG)
-		vars->flow_ctrl = params->req_fc_auto_adv;
-	else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
-		ret = 1;
-		bnx2x_cl45_read(bp, phy,
-				MDIO_AN_DEVAD,
-				MDIO_AN_REG_ADV_PAUSE, &ld_pause);
-		bnx2x_cl45_read(bp, phy,
-				MDIO_AN_DEVAD,
-				MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
-		pause_result = (ld_pause &
-				MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
-		pause_result |= (lp_pause &
-				 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
-		DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
-		   pause_result);
-		bnx2x_pause_resolve(vars, pause_result);
-	}
-	return ret;
-}
-
 static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
 				       struct bnx2x_phy *phy,
 				       struct link_vars *vars)
@@ -3845,13 +6585,13 @@
 			   pause_result);
 	}
 }
-static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
-					      struct bnx2x_phy *phy,
-					      u8 port)
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+					     struct bnx2x_phy *phy,
+					     u8 port)
 {
 	u32 count = 0;
 	u16 fw_ver1, fw_msgout;
-	u8 rc = 0;
+	int rc = 0;
 
 	/* Boot port from external ROM  */
 	/* EDC grst */
@@ -3926,7 +6666,7 @@
 /******************************************************************/
 /*			BCM8073 PHY SECTION			  */
 /******************************************************************/
-static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
 {
 	/* This is only required for 8073A1, version 102 only */
 	u16 val;
@@ -3952,7 +6692,7 @@
 	return 1;
 }
 
-static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
 {
 	u16 val, cnt, cnt1 ;
 
@@ -4059,9 +6799,9 @@
 	msleep(500);
 }
 
-static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val = 0, tmp1;
@@ -4081,9 +6821,9 @@
 
 	/* enable LASI */
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,  0x0004);
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
 
 	bnx2x_8073_set_pause_cl37(params, phy, vars);
 
@@ -4091,7 +6831,7 @@
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 
 	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
@@ -4225,7 +6965,7 @@
 	u16 an1000_status = 0;
 
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 
 	DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
 
@@ -4241,7 +6981,7 @@
 
 	/* Check the LASI */
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
 
 	DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
 
@@ -4367,9 +7107,9 @@
 /******************************************************************/
 /*			BCM8705 PHY SECTION			  */
 /******************************************************************/
-static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "init 8705\n");
@@ -4443,9 +7183,10 @@
 	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 	return gpio_port ^ (swap_val && swap_override);
 }
-static void bnx2x_sfp_set_transmitter(struct link_params *params,
-				      struct bnx2x_phy *phy,
-				      u8 tx_en)
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+					   struct bnx2x_phy *phy,
+					   u8 tx_en)
 {
 	u16 val;
 	u8 port = params->port;
@@ -4500,9 +7241,21 @@
 	}
 }
 
-static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-					    struct link_params *params,
-					    u16 addr, u8 byte_cnt, u8 *o_buf)
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+				      struct bnx2x_phy *phy,
+				      u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+	if (CHIP_IS_E3(bp))
+		bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+	else
+		bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u16 addr, u8 byte_cnt, u8 *o_buf)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val = 0;
@@ -4566,9 +7319,45 @@
 	return -EINVAL;
 }
 
-static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-					    struct link_params *params,
-					    u16 addr, u8 byte_cnt, u8 *o_buf)
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+						 struct link_params *params,
+						 u16 addr, u8 byte_cnt,
+						 u8 *o_buf)
+{
+	int rc = 0;
+	u8 i, j = 0, cnt = 0;
+	u32 data_array[4];
+	u16 addr32;
+	struct bnx2x *bp = params->bp;
+	/*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
+					" addr %d, cnt %d\n",
+					addr, byte_cnt);*/
+	if (byte_cnt > 16) {
+		DP(NETIF_MSG_LINK, "Reading from eeprom is"
+			    " is limited to 16 bytes\n");
+		return -EINVAL;
+	}
+
+	/* 4 byte aligned address */
+	addr32 = addr & (~0x3);
+	do {
+		rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+				    data_array);
+	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+	if (rc == 0) {
+		for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+			o_buf[j] = *((u8 *)data_array + i);
+			j++;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u16 addr, u8 byte_cnt, u8 *o_buf)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val, i;
@@ -4653,27 +7442,39 @@
 	return -EINVAL;
 }
 
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-				struct link_params *params, u16 addr,
-				u8 byte_cnt, u8 *o_buf)
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u16 addr,
+				 u8 byte_cnt, u8 *o_buf)
 {
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
-		return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
-							 byte_cnt, o_buf);
-	else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
-		return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
-							 byte_cnt, o_buf);
-	return -EINVAL;
+	int rc = -EINVAL;
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
+						       byte_cnt, o_buf);
+	break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
+						       byte_cnt, o_buf);
+	break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
+							   byte_cnt, o_buf);
+	break;
+	}
+	return rc;
 }
 
-static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
-			     struct link_params *params,
-			     u16 *edc_mode)
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+			      struct link_params *params,
+			      u16 *edc_mode)
 {
 	struct bnx2x *bp = params->bp;
+	u32 sync_offset = 0, phy_idx, media_types;
 	u8 val, check_limiting_mode = 0;
 	*edc_mode = EDC_MODE_LIMITING;
 
+	phy->media_type = ETH_PHY_UNSPECIFIED;
 	/* First check for copper cable */
 	if (bnx2x_read_sfp_module_eeprom(phy,
 					 params,
@@ -4688,7 +7489,7 @@
 	case SFP_EEPROM_CON_TYPE_VAL_COPPER:
 	{
 		u8 copper_module_type;
-
+		phy->media_type = ETH_PHY_DA_TWINAX;
 		/*
 		 * Check if its active cable (includes SFP+ module)
 		 * of passive cable
@@ -4697,8 +7498,7 @@
 					       params,
 					       SFP_EEPROM_FC_TX_TECH_ADDR,
 					       1,
-					       &copper_module_type) !=
-		    0) {
+					       &copper_module_type) != 0) {
 			DP(NETIF_MSG_LINK,
 				"Failed to read copper-cable-type"
 				" from SFP+ EEPROM\n");
@@ -4723,6 +7523,7 @@
 		break;
 	}
 	case SFP_EEPROM_CON_TYPE_VAL_LC:
+		phy->media_type = ETH_PHY_SFP_FIBER;
 		DP(NETIF_MSG_LINK, "Optic module detected\n");
 		check_limiting_mode = 1;
 		break;
@@ -4731,7 +7532,22 @@
 			 val);
 		return -EINVAL;
 	}
-
+	sync_offset = params->shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[params->port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+	/* Update media type for non-PMF sync */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (&(params->phy[phy_idx]) == phy) {
+			media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			break;
+		}
+	}
+	REG_WR(bp, sync_offset, media_types);
 	if (check_limiting_mode) {
 		u8 options[SFP_EEPROM_OPTIONS_SIZE];
 		if (bnx2x_read_sfp_module_eeprom(phy,
@@ -4755,8 +7571,8 @@
  * This function read the relevant field from the module (SFP+), and verify it
  * is compliant with this board
  */
-static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
-				  struct link_params *params)
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+				   struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u32 val, cmd;
@@ -4825,8 +7641,8 @@
 	return -EINVAL;
 }
 
-static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
-						struct link_params *params)
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+						 struct link_params *params)
 
 {
 	u8 val;
@@ -4858,8 +7674,8 @@
 	 * In the GPIO register, bit 4 is use to determine if the GPIOs are
 	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
 	 * output
-	 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
-	 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
+	 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+	 * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
 	 * where the 1st bit is the over-current(only input), and 2nd bit is
 	 * for power( only output )
 	 *
@@ -4868,15 +7684,14 @@
 	 */
 	if (phy->flags & FLAGS_NOC)
 		return;
-	if (!(phy->flags &
-	      FLAGS_NOC) && is_power_up)
+	if (is_power_up)
 		val = (1<<4);
 	else
 		/*
 		 * Set GPIO control to OUTPUT, and set the power bit
 		 * to according to the is_power_up
 		 */
-		val = ((!(is_power_up)) << 1);
+		val = (1<<1);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
@@ -4884,9 +7699,9 @@
 			 val);
 }
 
-static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
-				       struct bnx2x_phy *phy,
-				       u16 edc_mode)
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
 {
 	u16 cur_limiting_mode;
 
@@ -4934,9 +7749,9 @@
 	return 0;
 }
 
-static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
-				       struct bnx2x_phy *phy,
-				       u16 edc_mode)
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
 {
 	u16 phy_identifier;
 	u16 rom_ver2_val;
@@ -4989,7 +7804,7 @@
 	}
 }
 
-static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
 					   u8 gpio_mode)
 {
 	struct bnx2x *bp = params->bp;
@@ -5021,12 +7836,137 @@
 	}
 }
 
-static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
-				     struct link_params *params)
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+					  u8 gpio_mode)
+{
+	u32 pin_cfg;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region,
+				  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+		PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+		PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+	DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+		       gpio_mode, pin_cfg);
+	bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+	if (CHIP_IS_E3(bp)) {
+		/*
+		 * Low ==> if SFP+ module is supported otherwise
+		 * High ==> if SFP+ module is not on the approved vendor list
+		 */
+		bnx2x_set_e3_module_fault_led(params, gpio_mode);
+	} else
+		bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+					struct bnx2x_phy *phy,
+					u8 power)
+{
+	u32 pin_cfg;
+	struct bnx2x *bp = params->bp;
+
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+			PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+			PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+		       power, pin_cfg);
+	/*
+	 * Low ==> corresponding SFP+ module is powered
+	 * high ==> the SFP+ module is powered down
+	 */
+	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+				   struct bnx2x_phy *phy,
+				   u8 power)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_power_module(params->bp, phy, power);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_power_module(params, phy, power);
+		break;
+	default:
+		break;
+	}
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+					     struct bnx2x_phy *phy,
+					     u16 edc_mode)
+{
+	u16 val = 0;
+	u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+	struct bnx2x *bp = params->bp;
+
+	u8 lane = bnx2x_get_warpcore_lane(phy, params);
+	/* This is a global register which controls all lanes */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+	val &= ~(0xf << (lane << 2));
+
+	switch (edc_mode) {
+	case EDC_MODE_LINEAR:
+	case EDC_MODE_LIMITING:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+		break;
+	case EDC_MODE_PASSIVE_DAC:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+		break;
+	default:
+		break;
+	}
+
+	val |= (mode << (lane << 2));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+	/* A must read */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    u16 edc_mode)
+{
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+		break;
+	}
+}
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+			       struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u16 edc_mode;
-	u8 rc = 0;
+	int rc = 0;
 
 	u32 val = REG_RD(bp, params->shmem_base +
 			     offsetof(struct shmem_region, dev_info.
@@ -5034,7 +7974,8 @@
 
 	DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
 		 params->port);
-
+	/* Power up module */
+	bnx2x_power_sfp_module(params, phy, 1);
 	if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
 		DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
 		return -EINVAL;
@@ -5046,12 +7987,11 @@
 		bnx2x_set_sfp_module_fault_led(params,
 					       MISC_REGISTERS_GPIO_HIGH);
 
-		if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
-		    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
-		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
-			/* Shutdown SFP+ module */
+		/* Check if need to power down the SFP+ module */
+		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
 			DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
-			bnx2x_8727_power_module(bp, phy, 0);
+			bnx2x_power_sfp_module(params, phy, 0);
 			return rc;
 		}
 	} else {
@@ -5059,18 +7999,12 @@
 		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
 	}
 
-	/* power up the SFP module */
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
-		bnx2x_8727_power_module(bp, phy, 1);
-
 	/*
 	 * Check and set limiting mode / LRM mode on 8726. On 8727 it
 	 * is done automatically
 	 */
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
-		bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
-	else
-		bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
+	bnx2x_set_limiting_mode(params, phy, edc_mode);
+
 	/*
 	 * Enable transmit for this module if the module is approved, or
 	 * if unapproved modules should also enable the Tx laser
@@ -5088,23 +8022,33 @@
 void bnx2x_handle_module_detect_int(struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
-	struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
+	struct bnx2x_phy *phy;
 	u32 gpio_val;
-	u8 port = params->port;
+	u8 gpio_num, gpio_port;
+	if (CHIP_IS_E3(bp))
+		phy = &params->phy[INT_PHY];
+	else
+		phy = &params->phy[EXT_PHY1];
+
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+				      params->port, &gpio_num, &gpio_port) ==
+	    -EINVAL) {
+		DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+		return;
+	}
 
 	/* Set valid module led off */
 	bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
 
 	/* Get current gpio val reflecting module plugged in / out*/
-	gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
 
 	/* Call the handling function in case module is detected */
 	if (gpio_val == 0) {
-
-		bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
+		bnx2x_power_sfp_module(params, phy, 1);
+		bnx2x_set_gpio_int(bp, gpio_num,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
-				   port);
-
+				   gpio_port);
 		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
 			bnx2x_sfp_module_detection(phy, params);
 		else
@@ -5115,13 +8059,14 @@
 					  port_feature_config[params->port].
 					  config));
 
-		bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
+		bnx2x_set_gpio_int(bp, gpio_num,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
-				   port);
+				   gpio_port);
 		/*
 		 * Module was plugged out.
 		 * Disable transmit for this module
 		 */
+		phy->media_type = ETH_PHY_NOT_PRESENT;
 		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 		    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
 			bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -5129,6 +8074,29 @@
 }
 
 /******************************************************************/
+/*		Used by 8706 and 8727                             */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+				 struct bnx2x_phy *phy,
+				 u16 alarm_status_offset,
+				 u16 alarm_ctrl_offset)
+{
+	u16 alarm_status, val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	/* Mask or enable the fault event. */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+	if (alarm_status & (1<<0))
+		val &= ~(1<<0);
+	else
+		val |= (1<<0);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
 /*		common BCM8706/BCM8726 PHY SECTION		  */
 /******************************************************************/
 static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
@@ -5141,12 +8109,16 @@
 	DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
 	/* Clear RX Alarm*/
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
 	/* clear LASI indication*/
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
 	DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
 
 	bnx2x_cl45_read(bp, phy,
@@ -5173,6 +8145,17 @@
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
 		vars->duplex = DUPLEX_FULL;
 	}
+
+	/* Capture 10G link fault. Read twice to clear stale value. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		if (val1 & (1<<0))
+			vars->fault_detected = 1;
+	}
+
 	return link_up;
 }
 
@@ -5186,6 +8169,10 @@
 	u32 tx_en_mode;
 	u16 cnt, val, tmp1;
 	struct bnx2x *bp = params->bp;
+
+	/* SPF+ PHY: Set flag to check for Tx error */
+	vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
+
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 	/* HW reset */
@@ -5228,7 +8215,11 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+				 0);
+		/* Arm LASI for link and Tx fault. */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
 	} else {
 		/* Force 1Gbps using autoneg with 1G advertisement */
 
@@ -5251,10 +8242,10 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 				 0x0400);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
 				 0x0004);
 	}
 	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5281,9 +8272,9 @@
 	return 0;
 }
 
-static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	return bnx2x_8706_8726_read_status(phy, params, vars);
 }
@@ -5358,15 +8349,16 @@
 }
 
 
-static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u32 val;
-	u32 swap_val, swap_override, aeu_gpio_mask, offset;
 	DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
 
+	/* SPF+ PHY: Set flag to check for Tx error */
+	vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
+
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 	bnx2x_wait_reset_complete(bp, phy, params);
 
@@ -5387,9 +8379,9 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 				 0x400);
 	} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
 		   (phy->speed_cap_mask &
@@ -5415,14 +8407,14 @@
 		 * change
 		 */
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 				 0x400);
 
 	} else { /* Default 10G. Set only LASI control */
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
 	}
 
 	/* Set TX PreEmphasis if needed */
@@ -5443,30 +8435,6 @@
 				 phy->tx_preemphasis[1]);
 	}
 
-	/* Set GPIO3 to trigger SFP+ module insertion/removal */
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-		       MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
-
-	/* The GPIO should be swapped if the swap register is set and active */
-	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
-	/* Select function upon port-swap configuration */
-	if (params->port == 0) {
-		offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
-		aeu_gpio_mask = (swap_val && swap_override) ?
-			AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
-			AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
-	} else {
-		offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
-		aeu_gpio_mask = (swap_val && swap_override) ?
-			AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
-			AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
-	}
-	val = REG_RD(bp, offset);
-	/* add GPIO3 to group */
-	val |= aeu_gpio_mask;
-	REG_WR(bp, offset, val);
 	return 0;
 
 }
@@ -5548,9 +8516,9 @@
 		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 }
 
-static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	u32 tx_en_mode;
 	u16 tmp1, val, mod_abs, tmp2;
@@ -5559,18 +8527,24 @@
 	struct bnx2x *bp = params->bp;
 	/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
+	/* SPF+ PHY: Set flag to check for Tx error */
+	vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
+
 	bnx2x_wait_reset_complete(bp, phy, params);
 	rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
-	lasi_ctrl_val = 0x0004;
+	/* Should be 0x6 to enable XS on Tx side. */
+	lasi_ctrl_val = 0x0006;
 
 	DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
 	/* enable LASI */
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 			 rx_alarm_ctrl_val);
-
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+			 0);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
 	/*
 	 * Initially configure MOD_ABS to interrupt when module is
@@ -5612,7 +8586,7 @@
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 
 	/* Set option 1G speed */
 	if (phy->req_line_speed == SPEED_1000) {
@@ -5730,7 +8704,7 @@
 		/* Module is absent */
 		DP(NETIF_MSG_LINK, "MOD_ABS indication "
 			    "show module is absent\n");
-
+		phy->media_type = ETH_PHY_NOT_PRESENT;
 		/*
 		 * 1. Set mod_abs to detect next module
 		 *    presence event
@@ -5752,7 +8726,7 @@
 		 */
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
-				MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 
 	} else {
 		/* Module is present */
@@ -5781,7 +8755,7 @@
 		 */
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
-				MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 
 
 		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
@@ -5805,26 +8779,29 @@
 
 {
 	struct bnx2x *bp = params->bp;
-	u8 link_up = 0;
+	u8 link_up = 0, oc_port = params->port;
 	u16 link_status = 0;
 	u16 rx_alarm_status, lasi_ctrl, val1;
 
 	/* If PHY is not initialized, do not check link status */
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
 			&lasi_ctrl);
 	if (!lasi_ctrl)
 		return 0;
 
-	/* Check the LASI */
+	/* Check the LASI on Rx */
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
 			&rx_alarm_status);
 	vars->line_speed = 0;
 	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS  0x%x\n", rx_alarm_status);
 
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 
 	DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
 
@@ -5843,8 +8820,10 @@
 				&val1);
 
 		if ((val1 & (1<<8)) == 0) {
+			if (!CHIP_IS_E1x(bp))
+				oc_port = BP_PATH(bp) + (params->port << 1);
 			DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
-				       " on port %d\n", params->port);
+				       " on port %d\n", oc_port);
 			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
 					    " been detected and the power to "
 					    "that SFP+ module has been removed"
@@ -5852,11 +8831,11 @@
 					    " Please remove the SFP+ module and"
 					    " restart the system to clear this"
 					    " error.\n",
-			 params->port);
+			 oc_port);
 			/* Disable all RX_ALARMs except for mod_abs */
 			bnx2x_cl45_write(bp, phy,
 					 MDIO_PMA_DEVAD,
-					 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
+					 MDIO_PMA_LASI_RXCTRL, (1<<5));
 
 			bnx2x_cl45_read(bp, phy,
 					MDIO_PMA_DEVAD,
@@ -5869,7 +8848,7 @@
 			/* Clear RX alarm */
 			bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
-				MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
 			return 0;
 		}
 	} /* Over current check */
@@ -5879,7 +8858,7 @@
 		bnx2x_8727_handle_mod_abs(phy, params);
 		/* Enable all mod_abs and link detection bits */
 		bnx2x_cl45_write(bp, phy,
-				 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
 				 ((1<<5) | (1<<2)));
 	}
 	DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
@@ -5915,6 +8894,20 @@
 		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
 			   params->port);
 	}
+
+	/* Capture 10G link fault. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		if (val1 & (1<<0)) {
+			vars->fault_detected = 1;
+		}
+	}
+
 	if (link_up) {
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
 		vars->duplex = DUPLEX_FULL;
@@ -5948,7 +8941,7 @@
 	/* Disable Transmitter */
 	bnx2x_sfp_set_transmitter(params, phy, 0);
 	/* Clear LASI */
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
 
 }
 
@@ -5958,111 +8951,106 @@
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 					   struct link_params *params)
 {
-	u16 val, fw_ver1, fw_ver2, cnt, adj;
+	u16 val, fw_ver1, fw_ver2, cnt;
+	u8 port;
 	struct bnx2x *bp = params->bp;
 
-	adj = 0;
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
-		adj = -1;
+	port = params->port;
 
 	/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
 	/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
 
 	for (cnt = 0; cnt < 100; cnt++) {
-		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
 		if (val & 1)
 			break;
 		udelay(5);
 	}
 	if (cnt == 100) {
 		DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
-		bnx2x_save_spirom_version(bp, params->port, 0,
+		bnx2x_save_spirom_version(bp, port, 0,
 					  phy->ver_addr);
 		return;
 	}
 
 
 	/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
 	for (cnt = 0; cnt < 100; cnt++) {
-		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
 		if (val & 1)
 			break;
 		udelay(5);
 	}
 	if (cnt == 100) {
 		DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
-		bnx2x_save_spirom_version(bp, params->port, 0,
+		bnx2x_save_spirom_version(bp, port, 0,
 					  phy->ver_addr);
 		return;
 	}
 
 	/* lower 16 bits of the register SPI_FW_STATUS */
-	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
 	/* upper 16 bits of register SPI_FW_STATUS */
-	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
 
-	bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
+	bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
 				  phy->ver_addr);
 }
 
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
 				struct bnx2x_phy *phy)
 {
-	u16 val, adj;
-
-	adj = 0;
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
-		adj = -1;
+	u16 val;
 
 	/* PHYC_CTL_LED_CTL */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
+			MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
 	val &= 0xFE00;
 	val |= 0x0092;
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
+			 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED1_MASK + adj,
+			 MDIO_PMA_REG_8481_LED1_MASK,
 			 0x80);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED2_MASK + adj,
+			 MDIO_PMA_REG_8481_LED2_MASK,
 			 0x18);
 
 	/* Select activity source by Tx and Rx, as suggested by PHY AE */
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED3_MASK + adj,
+			 MDIO_PMA_REG_8481_LED3_MASK,
 			 0x0006);
 
 	/* Select the closest activity blink rate to that in 10/100/1000 */
 	bnx2x_cl45_write(bp, phy,
 			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_8481_LED3_BLINK + adj,
+			MDIO_PMA_REG_8481_LED3_BLINK,
 			0);
 
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
+			MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
 	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
+			 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
 
 	/* 'Interrupt Mask' */
 	bnx2x_cl45_write(bp, phy,
@@ -6070,12 +9058,19 @@
 			 0xFFFB, 0xFFFD);
 }
 
-static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
-				      struct link_params *params,
-				      struct link_vars *vars)
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 autoneg_val, an_1000_val, an_10_100_val;
+	u16 tmp_req_line_speed;
+
+	tmp_req_line_speed = phy->req_line_speed;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		if (phy->req_line_speed == SPEED_10000)
+			phy->req_line_speed = SPEED_AUTO_NEG;
+
 	/*
 	 * This phy uses the NIG latch mechanism since link indication
 	 * arrives through its LED4 and not via its LASI signal, so we
@@ -6179,10 +9174,10 @@
 	    (phy->speed_cap_mask &
 	     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
 		(phy->req_line_speed == SPEED_10000)) {
-		DP(NETIF_MSG_LINK, "Advertising 10G\n");
-		/* Restart autoneg for 10G*/
+			DP(NETIF_MSG_LINK, "Advertising 10G\n");
+			/* Restart autoneg for 10G*/
 
-		bnx2x_cl45_write(bp, phy,
+			bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
 				 0x3200);
 	} else if (phy->req_line_speed != SPEED_10 &&
@@ -6195,12 +9190,14 @@
 	/* Save spirom version */
 	bnx2x_save_848xx_spirom_version(phy, params);
 
+	phy->req_line_speed = tmp_req_line_speed;
+
 	return 0;
 }
 
-static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	/* Restore normal power mode*/
@@ -6215,33 +9212,155 @@
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
-				  struct link_params *params,
-				  struct link_vars *vars)
+
+#define PHY84833_HDSHK_WAIT 300
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	u32 idx;
+	u16 val;
+	u16 data = 0x01b1;
+	struct bnx2x *bp = params->bp;
+	/* Do pair swap */
+
+
+	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG2,
+			PHY84833_CMD_OPEN_OVERRIDE);
+	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+		if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+			break;
+		msleep(1);
+	}
+	if (idx >= PHY84833_HDSHK_WAIT) {
+		DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG4,
+			data);
+	/* Issue pair swap command */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG0,
+			PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
+	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+		if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+			(val == PHY84833_CMD_COMPLETE_ERROR))
+			break;
+		msleep(1);
+	}
+	if ((idx >= PHY84833_HDSHK_WAIT) ||
+		(val == PHY84833_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+		return -EINVAL;
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG2,
+			PHY84833_CMD_CLEAR_COMPLETE);
+	DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+	return 0;
+}
+
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+						u32 shmem_base_path[],
+						u32 chip_id)
+{
+	u32 reset_pin[2];
+	u32 idx;
+	u8 reset_gpios;
+	if (CHIP_IS_E3(bp)) {
+		/* Assume that these will be GPIOs, not EPIOs. */
+		for (idx = 0; idx < 2; idx++) {
+			/* Map config param to register bit. */
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+			reset_pin[idx] = (reset_pin[idx] &
+				PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+				PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+			reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	} else {
+		/* E2, look from diff place of shmem. */
+		for (idx = 0; idx < 2; idx++) {
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].default_cfg));
+			reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+			reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+			reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	}
+
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+	msleep(800);
+	DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+		reset_gpios);
+
+	return 0;
+}
+
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port, initialize = 1;
-	u16 val, adj;
+	u16 val;
 	u16 temp;
 	u32 actual_phy_selection, cms_enable;
-	u8 rc = 0;
-
-	/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
-	adj = 0;
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
-		adj = 3;
+	int rc = 0;
 
 	msleep(1);
-	if (CHIP_IS_E2(bp))
+
+	if (!(CHIP_IS_E1(bp)))
 		port = BP_PATH(bp);
 	else
 		port = params->port;
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-		       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
-		       port);
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
+	} else {
+		bnx2x_cl45_write(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, 0x8000);
+	}
+
 	bnx2x_wait_reset_complete(bp, phy, params);
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
+
+	/* Bring PHY out of super isolate mode */
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+		val &= ~MDIO_84833_SUPER_ISOLATE;
+		bnx2x_cl45_write(bp, phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+		bnx2x_wait_reset_complete(bp, phy, params);
+	}
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		bnx2x_84833_pair_swap_cfg(phy, params, vars);
+
 	/*
 	 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
 	 */
@@ -6254,14 +9373,20 @@
 	/* Set dual-media configuration according to configuration */
 
 	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_CTL_REG_84823_MEDIA + adj, &val);
+			MDIO_CTL_REG_84823_MEDIA, &val);
 	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
 		 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
 		 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
 		 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
 		 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
-	val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
-		MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
+
+	if (CHIP_IS_E3(bp)) {
+		val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+			 MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+	} else {
+		val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+			MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+	}
 
 	actual_phy_selection = bnx2x_phy_selection(params);
 
@@ -6287,7 +9412,7 @@
 		val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
 
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			 MDIO_CTL_REG_84823_MEDIA + adj, val);
+			 MDIO_CTL_REG_84823_MEDIA, val);
 	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 		   params->multi_phy_config, val);
 
@@ -6318,20 +9443,16 @@
 				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u16 val, val1, val2, adj;
+	u16 val, val1, val2;
 	u8 link_up = 0;
 
-	/* Reg offset adjustment for 84833 */
-	adj = 0;
-	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
-		adj = -1;
 
 	/* Check 10G-BaseT link status */
 	/* Check PMD signal ok */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_AN_DEVAD, 0xFFFA, &val1);
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
 			&val2);
 	DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
 
@@ -6403,9 +9524,10 @@
 	return link_up;
 }
 
-static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
 {
-	u8 status = 0;
+	int status = 0;
 	u32 spirom_ver;
 	spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
 	status = bnx2x_format_ver(spirom_ver, str, len);
@@ -6435,13 +9557,27 @@
 {
 	struct bnx2x *bp = params->bp;
 	u8 port;
-	if (CHIP_IS_E2(bp))
+	u16 val16;
+
+	if (!(CHIP_IS_E1(bp)))
 		port = BP_PATH(bp);
 	else
 		port = params->port;
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
-		       port);
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+			       port);
+	} else {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_CTL_DEVAD,
+				0x400f, &val16);
+		/* Put to low power mode on newer FW */
+		if ((val16 & 0x303f) > 0x1009)
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_CTRL, 0x800);
+	}
 }
 
 static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6449,11 +9585,17 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 val;
+	u8 port;
+
+	if (!(CHIP_IS_E1(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
 
 	switch (mode) {
 	case LED_MODE_OFF:
 
-		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
 
 		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 		    SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6489,7 +9631,7 @@
 	case LED_MODE_FRONT_PANEL_OFF:
 
 		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
-		   params->port);
+		   port);
 
 		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 		    SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6524,7 +9666,7 @@
 		break;
 	case LED_MODE_ON:
 
-		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
 
 		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 		    SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6571,7 +9713,7 @@
 
 	case LED_MODE_OPER:
 
-		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
 
 		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
 		    SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6633,7 +9775,340 @@
 		}
 		break;
 	}
+
+	/*
+	 * This is a workaround for E3+84833 until autoneg
+	 * restart is fixed in f/w
+	 */
+	if (CHIP_IS_E3(bp)) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+	}
 }
+
+/******************************************************************/
+/*			54616S PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_54616s_config_init(struct bnx2x_phy *phy,
+					       struct link_params *params,
+					       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+	u32 cfg_pin;
+
+	DP(NETIF_MSG_LINK, "54616S cfg init\n");
+	usleep_range(1000, 1000);
+
+	/* This works with E3 only, no need to check the chip
+	   before determining the port. */
+	port = params->port;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin high to bring the GPHY out of reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+	/* wait for GPHY to reset */
+	msleep(50);
+
+	/* reset phy */
+	bnx2x_cl22_write(bp, phy,
+			 MDIO_PMA_REG_CTRL, 0x8000);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/*wait for GPHY to reset */
+	msleep(50);
+
+	/* Configure LED4: set to INTR (0x6). */
+	/* Accessing shadow register 0xe. */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_LED_SEL2);
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			&temp);
+	temp &= ~(0xf << 4);
+	temp |= (0x6 << 4);
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+	/* Configure INTR based on link status change. */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_INTR_MASK,
+			~MDIO_REG_INTR_MASK_LINK_STATUS);
+
+	/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			&temp);
+	temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+	/* Set up fc */
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	fc_val = 0;
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+	/* read all advertisement */
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	bnx2x_cl22_read(bp, phy,
+			0x04,
+			&an_10_100_val);
+
+	bnx2x_cl22_read(bp, phy,
+			MDIO_PMA_REG_CTRL,
+			&autoneg_val);
+
+	/* Disable forced speed */
+	autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+	an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+			   (1<<11));
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+			(phy->req_line_speed == SPEED_1000)) {
+		an_1000_val |= (1<<8);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_1000_val |= (1<<9);
+		DP(NETIF_MSG_LINK, "Advertising 1G\n");
+	} else
+		an_1000_val &= ~((1<<8) | (1<<9));
+
+	bnx2x_cl22_write(bp, phy,
+			0x09,
+			an_1000_val);
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	/* set 100 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+		an_10_100_val |= (1<<7);
+		/* Enable autoneg and restart autoneg for legacy speeds */
+		autoneg_val |= (1<<9 | 1<<12);
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<8);
+		DP(NETIF_MSG_LINK, "Advertising 100M\n");
+	}
+
+	/* set 10 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+		an_10_100_val |= (1<<5);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<6);
+		DP(NETIF_MSG_LINK, "Advertising 10M\n");
+	}
+
+	/* Only 10/100 are allowed to work in FORCE mode */
+	if (phy->req_line_speed == SPEED_100) {
+		autoneg_val |= (1<<13);
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 100M force\n");
+	}
+	if (phy->req_line_speed == SPEED_10) {
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 10M force\n");
+	}
+
+	bnx2x_cl22_write(bp, phy,
+			0x04,
+			an_10_100_val | fc_val);
+
+	if (phy->req_duplex == DUPLEX_FULL)
+		autoneg_val |= (1<<8);
+
+	bnx2x_cl22_write(bp, phy,
+			MDIO_PMA_REG_CTRL, autoneg_val);
+
+	return 0;
+}
+
+static void bnx2x_54616s_set_link_led(struct bnx2x_phy *phy,
+				      struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "54616S set link led (mode=%x)\n", mode);
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+	case LED_MODE_OPER:
+	case LED_MODE_ON:
+	default:
+		break;
+	}
+	return;
+}
+
+static void bnx2x_54616s_link_reset(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port;
+
+	/* This works with E3 only, no need to check the chip
+	   before determining the port. */
+	port = params->port;
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin low to put GPHY in reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54616s_read_status(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u8 link_up = 0;
+	u16 legacy_status, legacy_speed;
+
+	/* Get speed operation status */
+	bnx2x_cl22_read(bp, phy,
+			0x19,
+			&legacy_status);
+	DP(NETIF_MSG_LINK, "54616S read_status: 0x%x\n", legacy_status);
+
+	/* Read status to clear the PHY interrupt. */
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_INTR_STATUS,
+			&val);
+
+	link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+	if (link_up) {
+		legacy_speed = (legacy_status & (7<<8));
+		if (legacy_speed == (7<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (6<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (5<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_FULL;
+		}
+		/* Omitting 100Base-T4 for now */
+		else if (legacy_speed == (3<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (2<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (1<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_HALF;
+		} else /* Should not happen */
+			vars->line_speed = 0;
+
+		DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+			   " is_duplex_full= %d\n", vars->line_speed,
+			   (vars->duplex == DUPLEX_FULL));
+
+		/* Check legacy speed AN resolution */
+		bnx2x_cl22_read(bp, phy,
+				0x01,
+				&val);
+		if (val & (1<<5))
+			vars->link_status |=
+				LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+		bnx2x_cl22_read(bp, phy,
+				0x06,
+				&val);
+		if ((val & (1<<0)) == 0)
+			vars->link_status |=
+				LINK_STATUS_PARALLEL_DETECTION_USED;
+
+		DP(NETIF_MSG_LINK, "BCM54616S: link speed is %d\n",
+			   vars->line_speed);
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+	return link_up;
+}
+
+static void bnx2x_54616s_config_loopback(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+	DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54616s\n");
+
+	/* Enable master/slave manual mmode and set to master */
+	/* mii write 9 [bits set 11 12] */
+	bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+	/* forced 1G and disable autoneg */
+	/* set val [mii read 0] */
+	/* set val [expr $val & [bits clear 6 12 13]] */
+	/* set val [expr $val | [bits set 6 8]] */
+	/* mii write 0 $val */
+	bnx2x_cl22_read(bp, phy, 0x00, &val);
+	val &= ~((1<<6) | (1<<12) | (1<<13));
+	val |= (1<<6) | (1<<8);
+	bnx2x_cl22_write(bp, phy, 0x00, val);
+
+	/* Set external loopback and Tx using 6dB coding */
+	/* mii write 0x18 7 */
+	/* set val [mii read 0x18] */
+	/* mii write 0x18 [expr $val | [bits set 10 15]] */
+	bnx2x_cl22_write(bp, phy, 0x18, 7);
+	bnx2x_cl22_read(bp, phy, 0x18, &val);
+	bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	/*
+	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
 /******************************************************************/
 /*			SFX7101 PHY SECTION			  */
 /******************************************************************/
@@ -6646,9 +10121,9 @@
 			 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
 }
 
-static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
-				 struct link_params *params,
-				 struct link_vars *vars)
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	u16 fw_ver1, fw_ver2, val;
 	struct bnx2x *bp = params->bp;
@@ -6662,7 +10137,7 @@
 	bnx2x_wait_reset_complete(bp, phy, params);
 
 	bnx2x_cl45_write(bp, phy,
-			 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
 	DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
@@ -6694,9 +10169,9 @@
 	u8 link_up;
 	u16 val1, val2;
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
 	DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
 		   val2, val1);
 	bnx2x_cl45_read(bp, phy,
@@ -6721,8 +10196,7 @@
 	return link_up;
 }
 
-
-static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
 {
 	if (*len < 5)
 		return -EINVAL;
@@ -6800,9 +10274,8 @@
 static struct bnx2x_phy phy_null = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
 	.addr		= 0,
-	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6827,9 +10300,8 @@
 static struct bnx2x_phy phy_serdes = {
 	.type		= PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
-	.flags		= 0,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6843,14 +10315,14 @@
 			   SUPPORTED_Autoneg |
 			   SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
-	.media_type	= ETH_PHY_UNSPECIFIED,
+	.media_type	= ETH_PHY_BASE_T,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
 	.req_line_speed	= 0,
 	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
-	.config_init	= (config_init_t)bnx2x_init_serdes,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
 	.read_status	= (read_status_t)bnx2x_link_settings_status,
 	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
 	.config_loopback = (config_loopback_t)NULL,
@@ -6863,9 +10335,8 @@
 static struct bnx2x_phy phy_xgxs = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
-	.flags		= 0,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6880,14 +10351,14 @@
 			   SUPPORTED_Autoneg |
 			   SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
-	.media_type	= ETH_PHY_UNSPECIFIED,
+	.media_type	= ETH_PHY_CX4,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
 	.req_line_speed	= 0,
 	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
-	.config_init	= (config_init_t)bnx2x_init_xgxs,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
 	.read_status	= (read_status_t)bnx2x_link_settings_status,
 	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
 	.config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
@@ -6896,13 +10367,49 @@
 	.set_link_led	= (set_link_led_t)NULL,
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
+static struct bnx2x_phy phy_warpcore = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_HW_LOCK_REQUIRED,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			     SUPPORTED_10baseT_Full |
+			     SUPPORTED_100baseT_Half |
+			     SUPPORTED_100baseT_Full |
+			     SUPPORTED_1000baseT_Full |
+			     SUPPORTED_10000baseT_Full |
+			     SUPPORTED_20000baseKR2_Full |
+			     SUPPORTED_20000baseMLD2_Full |
+			     SUPPORTED_FIBRE |
+			     SUPPORTED_Autoneg |
+			     SUPPORTED_Pause |
+			     SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_UNSPECIFIED,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_warpcore_config_init,
+	.read_status	= (read_status_t)bnx2x_warpcore_read_status,
+	.link_reset	= (link_reset_t)bnx2x_warpcore_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
 
 static struct bnx2x_phy phy_7101 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
 	.addr		= 0xff,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6930,9 +10437,8 @@
 static struct bnx2x_phy phy_8073 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
 	.addr		= 0xff,
-	.flags		= FLAGS_HW_LOCK_REQUIRED,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_HW_LOCK_REQUIRED,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6943,7 +10449,7 @@
 			   SUPPORTED_Autoneg |
 			   SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
-	.media_type	= ETH_PHY_UNSPECIFIED,
+	.media_type	= ETH_PHY_KR,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
 	.req_line_speed	= 0,
@@ -6962,9 +10468,8 @@
 static struct bnx2x_phy phy_8705 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
 	.addr		= 0xff,
-	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -6991,9 +10496,8 @@
 static struct bnx2x_phy phy_8706 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
 	.addr		= 0xff,
-	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7022,10 +10526,9 @@
 static struct bnx2x_phy phy_8726 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
 	.addr		= 0xff,
+	.def_md_devad	= 0,
 	.flags		= (FLAGS_HW_LOCK_REQUIRED |
 			   FLAGS_INIT_XGXS_FIRST),
-	.def_md_devad	= 0,
-	.reserved	= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7035,7 +10538,7 @@
 			   SUPPORTED_FIBRE |
 			   SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
-	.media_type	= ETH_PHY_SFP_FIBER,
+	.media_type	= ETH_PHY_NOT_PRESENT,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
 	.req_line_speed	= 0,
@@ -7055,9 +10558,8 @@
 static struct bnx2x_phy phy_8727 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
 	.addr		= 0xff,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
 	.def_md_devad	= 0,
-	.reserved	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7066,7 +10568,7 @@
 			   SUPPORTED_FIBRE |
 			   SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
-	.media_type	= ETH_PHY_SFP_FIBER,
+	.media_type	= ETH_PHY_NOT_PRESENT,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
 	.req_line_speed	= 0,
@@ -7085,10 +10587,9 @@
 static struct bnx2x_phy phy_8481 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
 	.addr		= 0xff,
+	.def_md_devad	= 0,
 	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
 			  FLAGS_REARM_LATCH_SIGNAL,
-	.def_md_devad	= 0,
-	.reserved	= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7122,10 +10623,9 @@
 static struct bnx2x_phy phy_84823 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
 	.addr		= 0xff,
+	.def_md_devad	= 0,
 	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
 			  FLAGS_REARM_LATCH_SIGNAL,
-	.def_md_devad	= 0,
-	.reserved	= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7159,10 +10659,9 @@
 static struct bnx2x_phy phy_84833 = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
 	.addr		= 0xff,
+	.def_md_devad	= 0,
 	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
 			    FLAGS_REARM_LATCH_SIGNAL,
-	.def_md_devad	= 0,
-	.reserved	= 0,
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -7193,6 +10692,39 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
+static struct bnx2x_phy phy_54616s = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_54616s_config_init,
+	.read_status	= (read_status_t)bnx2x_54616s_read_status,
+	.link_reset	= (link_reset_t)bnx2x_54616s_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_54616s_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_54616s_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
 /*****************************************************************/
 /*                                                               */
 /* Populate the phy according. Main function: bnx2x_populate_phy   */
@@ -7259,8 +10791,8 @@
 
 	return ext_phy_config;
 }
-static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
-				 struct bnx2x_phy *phy)
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+				  struct bnx2x_phy *phy)
 {
 	u32 phy_addr;
 	u32 chip_id;
@@ -7269,22 +10801,105 @@
 			dev_info.port_feature_config[port].link_config)) &
 			  PORT_FEATURE_CONNECTED_SWITCH_MASK);
 	chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
-	switch (switch_cfg) {
-	case SWITCH_CFG_1G:
+	DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+	if (USES_WARPCORE(bp)) {
+		u32 serdes_net_if;
 		phy_addr = REG_RD(bp,
-					NIG_REG_SERDES0_CTRL_PHY_ADDR +
-					port * 0x10);
-		*phy = phy_serdes;
-		break;
-	case SWITCH_CFG_10G:
-		phy_addr = REG_RD(bp,
-					NIG_REG_XGXS0_CTRL_PHY_ADDR +
-					port * 0x18);
-		*phy = phy_xgxs;
-		break;
-	default:
-		DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
-		return -EINVAL;
+				  MISC_REG_WC0_CTRL_PHY_ADDR);
+		*phy = phy_warpcore;
+		if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+			phy->flags |= FLAGS_4_PORT_MODE;
+		else
+			phy->flags &= ~FLAGS_4_PORT_MODE;
+			/* Check Dual mode */
+		serdes_net_if = (REG_RD(bp, shmem_base +
+					offsetof(struct shmem_region, dev_info.
+					port_hw_config[port].default_cfg)) &
+				 PORT_HW_CFG_NET_SERDES_IF_MASK);
+		/*
+		 * Set the appropriate supported and flags indications per
+		 * interface type of the chip
+		 */
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+			phy->supported &= (SUPPORTED_10baseT_Half |
+					   SUPPORTED_10baseT_Full |
+					   SUPPORTED_100baseT_Half |
+					   SUPPORTED_100baseT_Full |
+					   SUPPORTED_1000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_BASE_T;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			phy->media_type = ETH_PHY_XFP_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_SFP_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			phy->media_type = ETH_PHY_KR;
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseKR2_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+				       serdes_net_if);
+			break;
+		}
+
+		/*
+		 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+		 * was not set as expected. For B0, ECO will be enabled so there
+		 * won't be an issue there
+		 */
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			phy->flags |= FLAGS_MDC_MDIO_WA;
+	} else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_SERDES0_CTRL_PHY_ADDR +
+					  port * 0x10);
+			*phy = phy_serdes;
+			break;
+		case SWITCH_CFG_10G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_XGXS0_CTRL_PHY_ADDR +
+					  port * 0x18);
+			*phy = phy_xgxs;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+			return -EINVAL;
+		}
 	}
 	phy->addr = (u8)phy_addr;
 	phy->mdio_ctrl = bnx2x_get_emac_base(bp,
@@ -7302,12 +10917,12 @@
 	return 0;
 }
 
-static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
-				 u8 phy_index,
-				 u32 shmem_base,
-				 u32 shmem2_base,
-				 u8 port,
-				 struct bnx2x_phy *phy)
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+				  u8 phy_index,
+				  u32 shmem_base,
+				  u32 shmem2_base,
+				  u8 port,
+				  struct bnx2x_phy *phy)
 {
 	u32 ext_phy_config, phy_type, config2;
 	u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
@@ -7336,6 +10951,7 @@
 		*phy = phy_8727;
 		phy->flags |= FLAGS_NOC;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
 		*phy = phy_8727;
@@ -7349,6 +10965,9 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 		*phy = phy_84833;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
+		*phy = phy_54616s;
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
 		*phy = phy_7101;
 		break;
@@ -7410,10 +11029,10 @@
 	return 0;
 }
 
-static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
-			     u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+			      u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
 {
-	u8 status = 0;
+	int status = 0;
 	phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
 	if (phy_index == INT_PHY)
 		return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
@@ -7527,10 +11146,10 @@
 }
 
 
-u8 bnx2x_phy_probe(struct link_params *params)
+int bnx2x_phy_probe(struct link_params *params)
 {
 	u8 phy_index, actual_phy_idx, link_cfg_idx;
-	u32 phy_config_swapped;
+	u32 phy_config_swapped, sync_offset, media_types;
 	struct bnx2x *bp = params->bp;
 	struct bnx2x_phy *phy;
 	params->num_phys = 0;
@@ -7567,6 +11186,26 @@
 		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
 			break;
 
+		sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].media_type);
+		media_types = REG_RD(bp, sync_offset);
+
+		/*
+		 * Update media type for non-PMF sync only for the first time
+		 * In case the media type changes afterwards, it will be updated
+		 * using the update_status function
+		 */
+		if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				    (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				     actual_phy_idx))) == 0) {
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				 actual_phy_idx));
+		}
+		REG_WR(bp, sync_offset, media_types);
+
 		bnx2x_phy_def_cfg(params, phy, phy_index);
 		params->num_phys++;
 	}
@@ -7575,43 +11214,141 @@
 	return 0;
 }
 
-static void set_phy_vars(struct link_params *params)
+void bnx2x_init_bmac_loopback(struct link_params *params,
+			      struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u8 actual_phy_idx, phy_index, link_cfg_idx;
-	u8 phy_config_swapped = params->multi_phy_config &
-			PORT_HW_CFG_PHY_SWAPPED_ENABLED;
-	for (phy_index = INT_PHY; phy_index < params->num_phys;
-	      phy_index++) {
-		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
-		actual_phy_idx = phy_index;
-		if (phy_config_swapped) {
-			if (phy_index == EXT_PHY1)
-				actual_phy_idx = EXT_PHY2;
-			else if (phy_index == EXT_PHY2)
-				actual_phy_idx = EXT_PHY1;
-		}
-		params->phy[actual_phy_idx].req_flow_ctrl =
-			params->req_flow_ctrl[link_cfg_idx];
+		vars->link_up = 1;
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_BMAC;
 
-		params->phy[actual_phy_idx].req_line_speed =
-			params->req_line_speed[link_cfg_idx];
+		vars->phy_flags = PHY_XGXS_FLAG;
 
-		params->phy[actual_phy_idx].speed_cap_mask =
-			params->speed_cap_mask[link_cfg_idx];
+		bnx2x_xgxs_deassert(params);
 
-		params->phy[actual_phy_idx].req_duplex =
-			params->req_duplex[link_cfg_idx];
+		/* set bmac loopback */
+		bnx2x_bmac_enable(params, vars, 1);
 
-		DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
-			   " speed_cap_mask %x\n",
-			   params->phy[actual_phy_idx].req_flow_ctrl,
-			   params->phy[actual_phy_idx].req_line_speed,
-			   params->phy[actual_phy_idx].speed_cap_mask);
-	}
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
 
-u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+void bnx2x_init_emac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->line_speed = SPEED_1000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_EMAC;
+
+		vars->phy_flags = PHY_XGXS_FLAG;
+
+		bnx2x_xgxs_deassert(params);
+		/* set bmac loopback */
+		bnx2x_emac_enable(params, vars, 1);
+		bnx2x_emac_program(params, vars);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xmac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	if (!params->req_line_speed[0])
+		vars->line_speed = SPEED_10000;
+	else
+		vars->line_speed = params->req_line_speed[0];
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_XMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	/*
+	 * Set WC to loopback mode since link is required to provide clock
+	 * to the XMAC in 20G mode
+	 */
+	if (vars->line_speed == SPEED_20000) {
+		bnx2x_set_aer_mmd(params, &params->phy[0]);
+		bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+		params->phy[INT_PHY].config_loopback(
+			&params->phy[INT_PHY],
+			params);
+	}
+	bnx2x_xmac_enable(params, vars, 1);
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_umac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	vars->line_speed = SPEED_1000;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_UMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	bnx2x_umac_enable(params, vars, 1);
+
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xgxs_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->duplex = DUPLEX_FULL;
+	if (params->req_line_speed[0] == SPEED_1000)
+			vars->line_speed = SPEED_1000;
+	else
+			vars->line_speed = SPEED_10000;
+
+	if (!USES_WARPCORE(bp))
+		bnx2x_xgxs_deassert(params);
+	bnx2x_link_initialize(params, vars);
+
+	if (params->req_line_speed[0] == SPEED_1000) {
+		if (USES_WARPCORE(bp))
+			bnx2x_umac_enable(params, vars, 0);
+		else {
+			bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
+		}
+	} else {
+		if (USES_WARPCORE(bp))
+			bnx2x_xmac_enable(params, vars, 0);
+		else
+			bnx2x_bmac_enable(params, vars, 0);
+	}
+
+		if (params->loopback_mode == LOOPBACK_XGXS) {
+			/* set 10G XGXS loopback */
+			params->phy[INT_PHY].config_loopback(
+				&params->phy[INT_PHY],
+				params);
+
+		} else {
+			/* set external phy loopback */
+			u8 phy_index;
+			for (phy_index = EXT_PHY1;
+			      phy_index < params->num_phys; phy_index++) {
+				if (params->phy[phy_index].config_loopback)
+					params->phy[phy_index].config_loopback(
+						&params->phy[phy_index],
+						params);
+			}
+		}
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "Phy Initialization started\n");
@@ -7641,101 +11378,43 @@
 		DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
 		return -EINVAL;
 	}
-	set_phy_vars(params);
+	set_phy_vars(params, vars);
 
 	DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
-	if (params->loopback_mode == LOOPBACK_BMAC) {
-
-		vars->link_up = 1;
-		vars->line_speed = SPEED_10000;
-		vars->duplex = DUPLEX_FULL;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->mac_type = MAC_TYPE_BMAC;
-
-		vars->phy_flags = PHY_XGXS_FLAG;
-
-		bnx2x_xgxs_deassert(params);
-
-		/* set bmac loopback */
-		bnx2x_bmac_enable(params, vars, 1);
-
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-	} else if (params->loopback_mode == LOOPBACK_EMAC) {
-
-		vars->link_up = 1;
-		vars->line_speed = SPEED_1000;
-		vars->duplex = DUPLEX_FULL;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->mac_type = MAC_TYPE_EMAC;
-
-		vars->phy_flags = PHY_XGXS_FLAG;
-
-		bnx2x_xgxs_deassert(params);
-		/* set bmac loopback */
-		bnx2x_emac_enable(params, vars, 1);
-		bnx2x_emac_program(params, vars);
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-	} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
-		   (params->loopback_mode == LOOPBACK_EXT_PHY)) {
-
-		vars->link_up = 1;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->duplex = DUPLEX_FULL;
-		if (params->req_line_speed[0] == SPEED_1000) {
-			vars->line_speed = SPEED_1000;
-			vars->mac_type = MAC_TYPE_EMAC;
-		} else {
-			vars->line_speed = SPEED_10000;
-			vars->mac_type = MAC_TYPE_BMAC;
+	switch (params->loopback_mode) {
+	case LOOPBACK_BMAC:
+		bnx2x_init_bmac_loopback(params, vars);
+		break;
+	case LOOPBACK_EMAC:
+		bnx2x_init_emac_loopback(params, vars);
+		break;
+	case LOOPBACK_XMAC:
+		bnx2x_init_xmac_loopback(params, vars);
+		break;
+	case LOOPBACK_UMAC:
+		bnx2x_init_umac_loopback(params, vars);
+		break;
+	case LOOPBACK_XGXS:
+	case LOOPBACK_EXT_PHY:
+		bnx2x_init_xgxs_loopback(params, vars);
+		break;
+	default:
+		if (!CHIP_IS_E3(bp)) {
+			if (params->switch_cfg == SWITCH_CFG_10G)
+				bnx2x_xgxs_deassert(params);
+			else
+				bnx2x_serdes_deassert(bp, params->port);
 		}
-
-		bnx2x_xgxs_deassert(params);
-		bnx2x_link_initialize(params, vars);
-
-		if (params->req_line_speed[0] == SPEED_1000) {
-			bnx2x_emac_program(params, vars);
-			bnx2x_emac_enable(params, vars, 0);
-		} else
-			bnx2x_bmac_enable(params, vars, 0);
-		if (params->loopback_mode == LOOPBACK_XGXS) {
-			/* set 10G XGXS loopback */
-			params->phy[INT_PHY].config_loopback(
-				&params->phy[INT_PHY],
-				params);
-
-		} else {
-			/* set external phy loopback */
-			u8 phy_index;
-			for (phy_index = EXT_PHY1;
-			      phy_index < params->num_phys; phy_index++) {
-				if (params->phy[phy_index].config_loopback)
-					params->phy[phy_index].config_loopback(
-						&params->phy[phy_index],
-						params);
-			}
-		}
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-		bnx2x_set_led(params, vars,
-			      LED_MODE_OPER, vars->line_speed);
-	} else
-	/* No loopback */
-	{
-		if (params->switch_cfg == SWITCH_CFG_10G)
-			bnx2x_xgxs_deassert(params);
-		else
-			bnx2x_serdes_deassert(bp, params->port);
-
 		bnx2x_link_initialize(params, vars);
 		msleep(30);
 		bnx2x_link_int_enable(params);
+		break;
 	}
 	return 0;
 }
-u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
-		    u8 reset_ext_phy)
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy)
 {
 	struct bnx2x *bp = params->bp;
 	u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7753,14 +11432,19 @@
 	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
 
 	/* disable nig egress interface */
-	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
-	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+	if (!CHIP_IS_E3(bp)) {
+		REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+	}
 
 	/* Stop BigMac rx */
-	bnx2x_bmac_rx_disable(bp, port);
-
+	if (!CHIP_IS_E3(bp))
+		bnx2x_bmac_rx_disable(bp, port);
+	else
+		bnx2x_xmac_disable(params);
 	/* disable emac */
-	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
 	msleep(10);
 	/* The PHY reset is controlled by GPIO 1
@@ -7796,21 +11480,22 @@
 	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
 	/* disable nig ingress interface */
-	REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
-	REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
-	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
-	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+	if (!CHIP_IS_E3(bp)) {
+		REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+	}
 	vars->link_up = 0;
+	vars->phy_flags = 0;
 	return 0;
 }
 
 /****************************************************************************/
 /*				Common function				    */
 /****************************************************************************/
-static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
-				     u32 shmem_base_path[],
-				     u32 shmem2_base_path[], u8 phy_index,
-				     u32 chip_id)
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
 {
 	struct bnx2x_phy phy[PORT_MAX];
 	struct bnx2x_phy *phy_blk[PORT_MAX];
@@ -7826,14 +11511,14 @@
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 		u32 shmem_base, shmem2_base;
 		/* In E2, same phy is using for port0 of the two paths */
-		if (CHIP_IS_E2(bp)) {
-			shmem_base = shmem_base_path[port];
-			shmem2_base = shmem2_base_path[port];
-			port_of_path = 0;
-		} else {
+		if (CHIP_IS_E1x(bp)) {
 			shmem_base = shmem_base_path[0];
 			shmem2_base = shmem2_base_path[0];
 			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
 		}
 
 		/* Extract the ext phy address for the port */
@@ -7877,10 +11562,10 @@
 
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		if (CHIP_IS_E2(bp))
-			port_of_path = 0;
-		else
+		if (CHIP_IS_E1x(bp))
 			port_of_path = port;
+		else
+			port_of_path = 0;
 
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
@@ -7933,10 +11618,10 @@
 	}
 	return 0;
 }
-static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
-				     u32 shmem_base_path[],
-				     u32 shmem2_base_path[], u8 phy_index,
-				     u32 chip_id)
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
 {
 	u32 val;
 	s8 port;
@@ -7954,12 +11639,12 @@
 		u32 shmem_base, shmem2_base;
 
 		/* In E2, same phy is using for port0 of the two paths */
-		if (CHIP_IS_E2(bp)) {
-			shmem_base = shmem_base_path[port];
-			shmem2_base = shmem2_base_path[port];
-		} else {
+		if (CHIP_IS_E1x(bp)) {
 			shmem_base = shmem_base_path[0];
 			shmem2_base = shmem2_base_path[0];
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
 		}
 		/* Extract the ext phy address for the port */
 		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
@@ -8027,10 +11712,11 @@
 		break;
 	}
 }
-static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
-				     u32 shmem_base_path[],
-				     u32 shmem2_base_path[], u8 phy_index,
-				     u32 chip_id)
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
 {
 	s8 port, reset_gpio;
 	u32 swap_val, swap_override;
@@ -8067,14 +11753,14 @@
 		u32 shmem_base, shmem2_base;
 
 		/* In E2, same phy is using for port0 of the two paths */
-		if (CHIP_IS_E2(bp)) {
-			shmem_base = shmem_base_path[port];
-			shmem2_base = shmem2_base_path[port];
-			port_of_path = 0;
-		} else {
+		if (CHIP_IS_E1x(bp)) {
 			shmem_base = shmem_base_path[0];
 			shmem2_base = shmem2_base_path[0];
 			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
 		}
 
 		/* Extract the ext phy address for the port */
@@ -8109,10 +11795,10 @@
 	}
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		if (CHIP_IS_E2(bp))
-			port_of_path = 0;
-		else
+		if (CHIP_IS_E1x(bp))
 			port_of_path = port;
+		else
+			port_of_path = 0;
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
 		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
@@ -8123,11 +11809,11 @@
 	return 0;
 }
 
-static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
-				    u32 shmem2_base_path[], u8 phy_index,
-				    u32 ext_phy_type, u32 chip_id)
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+				     u32 shmem2_base_path[], u8 phy_index,
+				     u32 ext_phy_type, u32 chip_id)
 {
-	u8 rc = 0;
+	int rc = 0;
 
 	switch (ext_phy_type) {
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
@@ -8135,7 +11821,7 @@
 						shmem2_base_path,
 						phy_index, chip_id);
 		break;
-
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
 		rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
@@ -8152,6 +11838,13 @@
 						shmem2_base_path,
 						phy_index, chip_id);
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+		/*
+		 * GPIO3's are linked, and so both need to be toggled
+		 * to obtain required 2us pulse.
+		 */
+		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 		rc = -EINVAL;
 		break;
@@ -8169,15 +11862,21 @@
 	return rc;
 }
 
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
-			 u32 shmem2_base_path[], u32 chip_id)
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id)
 {
-	u8 rc = 0;
-	u32 phy_ver;
-	u8 phy_index;
+	int rc = 0;
+	u32 phy_ver, val;
+	u8 phy_index = 0;
 	u32 ext_phy_type, ext_phy_config;
+	bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+	bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
 	DP(NETIF_MSG_LINK, "Begin common phy init\n");
-
+	if (CHIP_IS_E3(bp)) {
+		/* Enable EPIO */
+		val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+		REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+	}
 	/* Check if common init was already done */
 	phy_ver = REG_RD(bp, shmem_base_path[0] +
 			 offsetof(struct shmem_region,
@@ -8203,6 +11902,135 @@
 	return rc;
 }
 
+static void bnx2x_check_over_curr(struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+	u32 pin_val;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			       dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+		   PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+		PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+	/* Ignore check if no external input PIN available */
+	if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+		return;
+
+	if (!pin_val) {
+		if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
+					    " been detected and the power to "
+					    "that SFP+ module has been removed"
+					    " to prevent failure of the card."
+					    " Please remove the SFP+ module and"
+					    " restart the system to clear this"
+					    " error.\n",
+			 params->port);
+			vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+		}
+	} else
+		vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+static void bnx2x_analyze_link_error(struct link_params *params,
+				     struct link_vars *vars, u32 lss_status)
+{
+	struct bnx2x *bp = params->bp;
+	/* Compare new value with previous value */
+	u8 led_mode;
+	u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+
+	/*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n",
+		       vars->link_up,
+		       half_open_conn, lss_status);*/
+
+	if ((lss_status ^ half_open_conn) == 0)
+		return;
+
+	/* If values differ */
+	DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
+		       half_open_conn, lss_status);
+
+	/*
+	 * a. Update shmem->link_status accordingly
+	 * b. Update link_vars->link_up
+	 */
+	if (lss_status) {
+		vars->link_status &= ~LINK_STATUS_LINK_UP;
+		vars->link_up = 0;
+		vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+		/*
+		 * Set LED mode to off since the PHY doesn't know about these
+		 * errors
+		 */
+		led_mode = LED_MODE_OFF;
+	} else {
+		vars->link_status |= LINK_STATUS_LINK_UP;
+		vars->link_up = 1;
+		vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+		led_mode = LED_MODE_OPER;
+	}
+	/* Update the LED according to the link state */
+	bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+	/* Update link status in the shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* C. Trigger General Attention */
+	vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+	bnx2x_notify_link_changed(bp);
+}
+
+static void bnx2x_check_half_open_conn(struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 lss_status = 0;
+	u32 mac_base;
+	/* In case link status is physically up @ 10G do */
+	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+		return;
+
+	if (!CHIP_IS_E3(bp) &&
+	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
+		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) {
+		/* Check E1X / E2 BMAC */
+		u32 lss_status_reg;
+		u32 wb_data[2];
+		mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+			NIG_REG_INGRESS_BMAC0_MEM;
+		/*  Read BIGMAC_REGISTER_RX_LSS_STATUS */
+		if (CHIP_IS_E2(bp))
+			lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+		else
+			lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+		REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+		lss_status = (wb_data[0] > 0);
+
+		bnx2x_analyze_link_error(params, vars, lss_status);
+	}
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	if (!params) {
+		DP(NETIF_MSG_LINK, "Ininitliazed params !\n");
+		return;
+	}
+	/* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x
+	 RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed,
+	  REG_RD(bp, MISC_REG_RESET_REG_2)); */
+	bnx2x_check_half_open_conn(params, vars);
+	if (CHIP_IS_E3(bp))
+		bnx2x_check_over_curr(params, vars);
+}
+
 u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
 {
 	u8 phy_index;
@@ -8255,3 +12083,72 @@
 		}
 	}
 }
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port)
+{
+	u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+	u32 val;
+	u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+	if (CHIP_IS_E3(bp)) {
+		if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+					      shmem_base,
+					      port,
+					      &gpio_num,
+					      &gpio_port) != 0)
+			return;
+	} else {
+		struct bnx2x_phy phy;
+		for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+		      phy_index++) {
+			if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+					       shmem2_base, port, &phy)
+			    != 0) {
+				DP(NETIF_MSG_LINK, "populate phy failed\n");
+				return;
+			}
+			if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+				gpio_num = MISC_REGISTERS_GPIO_3;
+				gpio_port = port;
+				break;
+			}
+		}
+	}
+
+	if (gpio_num == 0xff)
+		return;
+
+	/* Set GPIO3 to trigger SFP+ module insertion/removal */
+	bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	gpio_port ^= (swap_val && swap_override);
+
+	vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+		(gpio_num + (gpio_port << 2));
+
+	sync_offset = shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].aeu_int_mask);
+	REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+	DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+		       gpio_num, gpio_port, vars->aeu_int_mask);
+
+	if (port == 0)
+		offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+	else
+		offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+	/* Open appropriate AEU for interrupts */
+	aeu_mask = REG_RD(bp, offset);
+	aeu_mask |= vars->aeu_int_mask;
+	REG_WR(bp, offset, aeu_mask);
+
+	/* Enable the GPIO to trigger interrupt */
+	val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+	val |= 1 << (gpio_num + (gpio_port << 2));
+	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 92f36b6..6f299c2 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -33,12 +33,13 @@
 #define BNX2X_FLOW_CTRL_BOTH		PORT_FEATURE_FLOW_CONTROL_BOTH
 #define BNX2X_FLOW_CTRL_NONE		PORT_FEATURE_FLOW_CONTROL_NONE
 
+#define NET_SERDES_IF_XFI		1
+#define NET_SERDES_IF_SFI		2
+#define NET_SERDES_IF_KR		3
+#define NET_SERDES_IF_DXGXS	4
+
 #define SPEED_AUTO_NEG		0
-#define SPEED_12000		12000
-#define SPEED_12500		12500
-#define SPEED_13000		13000
-#define SPEED_15000		15000
-#define SPEED_16000		16000
+#define SPEED_20000		20000
 
 #define SFP_EEPROM_VENDOR_NAME_ADDR		0x14
 #define SFP_EEPROM_VENDOR_NAME_SIZE		16
@@ -46,6 +47,12 @@
 #define SFP_EEPROM_VENDOR_OUI_SIZE		3
 #define SFP_EEPROM_PART_NO_ADDR			0x28
 #define SFP_EEPROM_PART_NO_SIZE			16
+#define SFP_EEPROM_REVISION_ADDR		0x38
+#define SFP_EEPROM_REVISION_SIZE		4
+#define SFP_EEPROM_SERIAL_ADDR			0x44
+#define SFP_EEPROM_SERIAL_SIZE			16
+#define SFP_EEPROM_DATE_ADDR			0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE			6
 #define PWR_FLT_ERR_MSG_LEN			250
 
 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,25 +69,26 @@
 #define SINGLE_MEDIA(params)		(params->num_phys == 2)
 /* Dual Media board contains two external phy with different media */
 #define DUAL_MEDIA(params)		(params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK		0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK		0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK		0xFFFF0000
 #define FW_PARAM_MDIO_CTRL_OFFSET		16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+					   FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+					   FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+					    FW_PARAM_MDIO_CTRL_MASK) >> \
+					    FW_PARAM_MDIO_CTRL_OFFSET)
 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
 	(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
 
-#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE		170
-#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE		0
-
-#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE			250
-#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE		0
-
-#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE			10
-#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE		90
-
-#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE			50
-#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE		250
 
 #define PFC_BRB_FULL_LB_XOFF_THRESHOLD				170
 #define PFC_BRB_FULL_LB_XON_THRESHOLD				250
 
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
 /***********************************************************/
 /*                         Structs                         */
 /***********************************************************/
@@ -121,8 +129,8 @@
 
 	/* Loaded during init */
 	u8 addr;
-
-	u8 flags;
+	u8 def_md_devad;
+	u16 flags;
 	/* Require HW lock */
 #define FLAGS_HW_LOCK_REQUIRED		(1<<0)
 	/* No Over-Current detection */
@@ -131,11 +139,13 @@
 #define FLAGS_FAN_FAILURE_DET_REQ	(1<<2)
 	/* Initialize first the XGXS and only then the phy itself */
 #define FLAGS_INIT_XGXS_FIRST		(1<<3)
+#define FLAGS_WC_DUAL_MODE		(1<<4)
+#define FLAGS_4_PORT_MODE		(1<<5)
 #define FLAGS_REARM_LATCH_SIGNAL	(1<<6)
 #define FLAGS_SFP_NOT_APPROVED		(1<<7)
+#define FLAGS_MDC_MDIO_WA		(1<<8)
+#define FLAGS_DUMMY_READ		(1<<9)
 
-	u8 def_md_devad;
-	u8 reserved;
 	/* preemphasis values for the rx side */
 	u16 rx_preemphasis[4];
 
@@ -153,6 +163,8 @@
 #define	ETH_PHY_XFP_FIBER   0x2
 #define	ETH_PHY_DA_TWINAX   0x3
 #define	ETH_PHY_BASE_T      0x4
+#define	ETH_PHY_KR          0xf0
+#define	ETH_PHY_CX4         0xf1
 #define	ETH_PHY_NOT_PRESENT 0xff
 
 	/* The address in which version is located*/
@@ -257,11 +269,19 @@
 /* Output parameters */
 struct link_vars {
 	u8 phy_flags;
+#define PHY_XGXS_FLAG			(1<<0)
+#define PHY_SGMII_FLAG			(1<<1)
+#define PHY_PHYSICAL_LINK_FLAG		(1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG		(1<<3)
+#define PHY_OVER_CURRENT_FLAG		(1<<4)
+#define PHY_TX_ERROR_CHECK_FLAG		(1<<5)
 
 	u8 mac_type;
 #define MAC_TYPE_NONE		0
 #define MAC_TYPE_EMAC		1
 #define MAC_TYPE_BMAC		2
+#define MAC_TYPE_UMAC		3
+#define MAC_TYPE_XMAC		4
 
 	u8 phy_link_up; /* internal phy link indication */
 	u8 link_up;
@@ -274,45 +294,52 @@
 
 	/* The same definitions as the shmem parameter */
 	u32 link_status;
+	u8 fault_detected;
+	u8 rsrv1;
+	u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT	0x0001
+
+	u32 aeu_int_mask;
 };
 
 /***********************************************************/
 /*                         Functions                       */
 /***********************************************************/
-u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
 
 /* Reset the link. Should be called when driver or interface goes down
    Before calling phy firmware upgrade, the reset_ext_phy should be set
    to 0 */
-u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
-		  u8 reset_ext_phy);
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy);
 
 /* bnx2x_link_update should be called upon link interrupt */
-u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
 
 /* use the following phy functions to read/write from external_phy
   In order to use it to read/write internal phy registers, use
   DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
   the register */
-u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
-		  u8 devad, u16 reg, u16 *ret_val);
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val);
 
-u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
-		   u8 devad, u16 reg, u16 val);
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val);
+
 /* Reads the link_status from the shmem,
    and update the link vars accordingly */
 void bnx2x_link_status_update(struct link_params *input,
 			    struct link_vars *output);
 /* returns string representing the fw_version of the external phy */
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
-			      u8 *version, u16 len);
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+				 u8 *version, u16 len);
 
 /* Set/Unset the led
    Basically, the CLC takes care of the led for the link, but in case one needs
    to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
    blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
-		 u8 mode, u32 speed);
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed);
 #define LED_MODE_OFF			0
 #define LED_MODE_ON			1
 #define LED_MODE_OPER			2
@@ -324,12 +351,12 @@
 
 /* Get the actual link status. In case it returns 0, link is up,
 	otherwise link is down*/
-u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
-		   u8 is_serdes);
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes);
 
 /* One-time initialization for external phy after power up */
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
-			 u32 shmem2_base_path[], u32 chip_id);
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id);
 
 /* Reset the external PHY using GPIO */
 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
@@ -338,9 +365,9 @@
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
 /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-				struct link_params *params, u16 addr,
-				u8 byte_cnt, u8 *o_buf);
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u16 addr,
+				 u8 byte_cnt, u8 *o_buf);
 
 void bnx2x_hw_reset_phy(struct link_params *params);
 
@@ -352,11 +379,28 @@
 u32 bnx2x_phy_selection(struct link_params *params);
 
 /* Probe the phys on board, and populate them in "params" */
-u8 bnx2x_phy_probe(struct link_params *params);
+int bnx2x_phy_probe(struct link_params *params);
+
 /* Checks if fan failure detection is required on one of the phys on board */
 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
 			     u32 shmem2_base, u8 port);
 
+
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS		(2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0	(6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1	(3)
+#define DCBX_E3B0_MAX_NUM_COS		( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+			    DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS			( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+			    DCBX_E2E3_MAX_NUM_COS))
+
 /* PFC port configuration params */
 struct bnx2x_nig_brb_pfc_port_params {
 	/* NIG */
@@ -364,8 +408,8 @@
 	u32 llfc_out_en;
 	u32 llfc_enable;
 	u32 pkt_priority_to_cos;
-	u32 rx_cos0_priority_mask;
-	u32 rx_cos1_priority_mask;
+	u8 num_of_rx_cos_priority_mask;
+	u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
 	u32 llfc_high_priority_classes;
 	u32 llfc_low_priority_classes;
 	/* BRB */
@@ -373,27 +417,74 @@
 	u32 cos1_pauseable;
 };
 
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+	u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+	/**
+	 * valid values are 0 - 5. 0 is highest strict priority.
+	 * There can't be two COS's with the same pri.
+	 */
+	u8 pri;
+};
+
+enum bnx2x_cos_state {
+	bnx2x_cos_state_strict = 0,
+	bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+	enum bnx2x_cos_state state ;
+	union {
+		struct bnx2x_ets_bw_params bw_params;
+		struct bnx2x_ets_sp_params sp_params;
+	} params;
+};
+
+struct bnx2x_ets_params {
+	u8 num_of_cos; /* Number of valid COS entries*/
+	struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
 /**
  * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
  * when link is already up
  */
-void bnx2x_update_pfc(struct link_params *params,
+int bnx2x_update_pfc(struct link_params *params,
 		      struct link_vars *vars,
 		      struct bnx2x_nig_brb_pfc_port_params *pfc_params);
 
 
 /* Used to configure the ETS to disable */
-void bnx2x_ets_disabled(struct link_params *params);
+int bnx2x_ets_disabled(struct link_params *params,
+		       struct link_vars *vars);
 
 /* Used to configure the ETS to BW limited */
 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
 			const u32 cos1_bw);
 
 /* Used to configure the ETS to strict */
-u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
 
+
+/*  Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 const struct bnx2x_ets_params *ets_params);
 /* Read pfc statistic*/
 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
 						 u32 pfc_frames_sent[2],
 						 u32 pfc_frames_received[2]);
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port);
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+			       struct link_params *params);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
 #endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 4b70311..af57217 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -39,6 +39,7 @@
 #include <linux/mii.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
+#include <net/ipv6.h>
 #include <net/tcp.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
@@ -50,12 +51,12 @@
 #include <linux/io.h>
 #include <linux/stringify.h>
 
-#define BNX2X_MAIN
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 #include "bnx2x_init_ops.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
 
 #include <linux/firmware.h>
 #include "bnx2x_fw_file_hdr.h"
@@ -73,12 +74,14 @@
 #define TX_TIMEOUT		(5*HZ)
 
 static char version[] __devinitdata =
-	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
+	"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
 	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir");
 MODULE_DESCRIPTION("Broadcom NetXtreme II "
-		   "BCM57710/57711/57711E/57712/57712E Driver");
+		   "BCM57710/57711/57711E/"
+		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+		   "57840/57840_MF Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 MODULE_FIRMWARE(FW_FILE_NAME_E1);
@@ -99,9 +102,11 @@
 module_param(disable_tpa, int, 0);
 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 
+#define INT_MODE_INTx			1
+#define INT_MODE_MSI			2
 static int int_mode;
 module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 				"(1 INT#x; 2 MSI)");
 
 static int dropless_fc;
@@ -120,37 +125,87 @@
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, " Default debug msglevel");
 
-static struct workqueue_struct *bnx2x_wq;
 
-#ifdef BCM_CNIC
-static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
-#endif
+
+struct workqueue_struct *bnx2x_wq;
 
 enum bnx2x_board_type {
 	BCM57710 = 0,
-	BCM57711 = 1,
-	BCM57711E = 2,
-	BCM57712 = 3,
-	BCM57712E = 4
+	BCM57711,
+	BCM57711E,
+	BCM57712,
+	BCM57712_MF,
+	BCM57800,
+	BCM57800_MF,
+	BCM57810,
+	BCM57810_MF,
+	BCM57840,
+	BCM57840_MF
 };
 
 /* indexed by board_type, above */
 static struct {
 	char *name;
 } board_info[] __devinitdata = {
-	{ "Broadcom NetXtreme II BCM57710 XGb" },
-	{ "Broadcom NetXtreme II BCM57711 XGb" },
-	{ "Broadcom NetXtreme II BCM57711E XGb" },
-	{ "Broadcom NetXtreme II BCM57712 XGb" },
-	{ "Broadcom NetXtreme II BCM57712E XGb" }
+	{ "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+	{ "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+	{ "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+						"Ethernet Multi Function"}
 };
 
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710		CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711		CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E	CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712		CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF	CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800		CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF	CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810		CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF	CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840		CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
+#endif
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
-	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 	{ 0 }
 };
 
@@ -167,48 +222,6 @@
 	REG_WR(bp,  addr + 4, U64_HI(mapping));
 }
 
-static inline void __storm_memset_fill(struct bnx2x *bp,
-				       u32 addr, size_t size, u32 val)
-{
-	int i;
-	for (i = 0; i < size/4; i++)
-		REG_WR(bp,  addr + (i * 4), val);
-}
-
-static inline void storm_memset_ustats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
-{
-	size_t size = sizeof(struct ustorm_per_client_stats);
-
-	u32 addr = BAR_USTRORM_INTMEM +
-			USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
-	__storm_memset_fill(bp, addr, size, 0);
-}
-
-static inline void storm_memset_tstats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
-{
-	size_t size = sizeof(struct tstorm_per_client_stats);
-
-	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
-	__storm_memset_fill(bp, addr, size, 0);
-}
-
-static inline void storm_memset_xstats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
-{
-	size_t size = sizeof(struct xstorm_per_client_stats);
-
-	u32 addr = BAR_XSTRORM_INTMEM +
-			XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
-	__storm_memset_fill(bp, addr, size, 0);
-}
-
-
 static inline void storm_memset_spq_addr(struct bnx2x *bp,
 					 dma_addr_t mapping, u16 abs_fid)
 {
@@ -218,103 +231,6 @@
 	__storm_memset_dma_mapping(bp, addr, mapping);
 }
 
-static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
-{
-	REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
-}
-
-static inline void storm_memset_func_cfg(struct bnx2x *bp,
-				struct tstorm_eth_function_common_config *tcfg,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct tstorm_eth_function_common_config);
-
-	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
-}
-
-static inline void storm_memset_xstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct stats_indication_flags);
-
-	u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_tstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct stats_indication_flags);
-
-	u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_ustats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct stats_indication_flags);
-
-	u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_cstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct stats_indication_flags);
-
-	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_xstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
-{
-	u32 addr = BAR_XSTRORM_INTMEM +
-		XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
-	__storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_tstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
-{
-	u32 addr = BAR_TSTRORM_INTMEM +
-		TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
-	__storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_ustats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
-{
-	u32 addr = BAR_USTRORM_INTMEM +
-		USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
-	__storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_cstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
-{
-	u32 addr = BAR_CSTRORM_INTMEM +
-		CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
-	__storm_memset_dma_mapping(bp, addr, mapping);
-}
-
 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
 					 u16 pf_id)
 {
@@ -359,45 +275,6 @@
 	REG_WR16(bp, addr, eq_prod);
 }
 
-static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
-					     u16 fw_sb_id, u8 sb_index,
-					     u8 ticks)
-{
-
-	int index_offset = CHIP_IS_E2(bp) ?
-		offsetof(struct hc_status_block_data_e2, index_data) :
-		offsetof(struct hc_status_block_data_e1x, index_data);
-	u32 addr = BAR_CSTRORM_INTMEM +
-			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
-			index_offset +
-			sizeof(struct hc_index_data)*sb_index +
-			offsetof(struct hc_index_data, timeout);
-	REG_WR8(bp, addr, ticks);
-	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
-			  port, fw_sb_id, sb_index, ticks);
-}
-static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
-					     u16 fw_sb_id, u8 sb_index,
-					     u8 disable)
-{
-	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
-	int index_offset = CHIP_IS_E2(bp) ?
-		offsetof(struct hc_status_block_data_e2, index_data) :
-		offsetof(struct hc_status_block_data_e1x, index_data);
-	u32 addr = BAR_CSTRORM_INTMEM +
-			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
-			index_offset +
-			sizeof(struct hc_index_data)*sb_index +
-			offsetof(struct hc_index_data, flags);
-	u16 flags = REG_RD16(bp, addr);
-	/* clear and set */
-	flags &= ~HC_INDEX_DATA_HC_ENABLED;
-	flags |= enable_flag;
-	REG_WR16(bp, addr, flags);
-	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
-			  port, fw_sb_id, sb_index, disable);
-}
-
 /* used only at init
  * locking is done by mcp
  */
@@ -491,13 +368,6 @@
 
 }
 
-const u32 dmae_reg_go_c[] = {
-	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
-	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
-	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
-	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
-};
-
 /* copy command into DMAE command memory and set DMAE command go */
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 {
@@ -578,7 +448,11 @@
 	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
 	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
-	/* lock the dmae channel */
+	/*
+	 * Lock the dmae channel. Disable BHs to prevent a dead-lock
+	 * as long as this code is called both from syscall context and
+	 * from ndo_set_rx_mode() flow that may be called from BH.
+	 */
 	spin_lock_bh(&bp->dmae_lock);
 
 	/* reset completion */
@@ -833,9 +707,9 @@
 	return rc;
 }
 
-static void bnx2x_fw_dump(struct bnx2x *bp)
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 {
-	u32 addr;
+	u32 addr, val;
 	u32 mark, offset;
 	__be32 data[9];
 	int word;
@@ -844,6 +718,14 @@
 		BNX2X_ERR("NO MCP - can not dump\n");
 		return;
 	}
+	netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+		(bp->common.bc_ver & 0xff0000) >> 16,
+		(bp->common.bc_ver & 0xff00) >> 8,
+		(bp->common.bc_ver & 0xff));
+
+	val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+	if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+		printk("%s" "MCP PC at 0x%x\n", lvl, val);
 
 	if (BP_PATH(bp) == 0)
 		trace_shmem_base = bp->common.shmem_base;
@@ -853,9 +735,9 @@
 	mark = REG_RD(bp, addr);
 	mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 			+ ((mark + 0x3) & ~0x3) - 0x08000000;
-	pr_err("begin fw dump (mark 0x%x)\n", mark);
+	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 
-	pr_err("");
+	printk("%s", lvl);
 	for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
 		for (word = 0; word < 8; word++)
 			data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -868,7 +750,12 @@
 		data[8] = 0x0;
 		pr_cont("%s", (char *)data);
 	}
-	pr_err("end of fw dump\n");
+	printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+	bnx2x_fw_dump_lvl(bp, KERN_ERR);
 }
 
 void bnx2x_panic_dump(struct bnx2x *bp)
@@ -889,9 +776,9 @@
 	/* Indices */
 	/* Common */
 	BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
-		  "  spq_prod_idx(0x%x)\n",
-		  bp->def_idx, bp->def_att_idx,
-		  bp->attn_state, bp->spq_prod_idx);
+		  "  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+		  bp->def_idx, bp->def_att_idx, bp->attn_state,
+		  bp->spq_prod_idx, bp->stats_counter);
 	BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
 		  bp->def_status_blk->atten_status_block.attn_bits,
 		  bp->def_status_blk->atten_status_block.attn_bits_ack,
@@ -908,15 +795,17 @@
 			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
 			i*sizeof(u32));
 
-	pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
+	pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) "
 			 "pf_id(0x%x)  vnic_id(0x%x)  "
-			 "vf_id(0x%x)  vf_valid (0x%x)\n",
+			 "vf_id(0x%x)  vf_valid (0x%x) "
+			 "state(0x%x)\n",
 	       sp_sb_data.igu_sb_id,
 	       sp_sb_data.igu_seg_id,
 	       sp_sb_data.p_func.pf_id,
 	       sp_sb_data.p_func.vnic_id,
 	       sp_sb_data.p_func.vf_id,
-	       sp_sb_data.p_func.vf_valid);
+	       sp_sb_data.p_func.vf_valid,
+	       sp_sb_data.state);
 
 
 	for_each_eth_queue(bp, i) {
@@ -925,13 +814,13 @@
 		struct hc_status_block_data_e2 sb_data_e2;
 		struct hc_status_block_data_e1x sb_data_e1x;
 		struct hc_status_block_sm  *hc_sm_p =
-			CHIP_IS_E2(bp) ?
-			sb_data_e2.common.state_machine :
-			sb_data_e1x.common.state_machine;
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.common.state_machine :
+			sb_data_e2.common.state_machine;
 		struct hc_index_data *hc_index_p =
-			CHIP_IS_E2(bp) ?
-			sb_data_e2.index_data :
-			sb_data_e1x.index_data;
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.index_data :
+			sb_data_e2.index_data;
 		int data_size;
 		u32 *sb_data_p;
 
@@ -954,8 +843,8 @@
 			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
 			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
 
-		loop = CHIP_IS_E2(bp) ?
-			HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
+		loop = CHIP_IS_E1x(bp) ?
+			HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
 
 		/* host sb data */
 
@@ -975,35 +864,39 @@
 			       fp->sb_index_values[j],
 			       (j == loop - 1) ? ")" : " ");
 		/* fw sb data */
-		data_size = CHIP_IS_E2(bp) ?
-			sizeof(struct hc_status_block_data_e2) :
-			sizeof(struct hc_status_block_data_e1x);
+		data_size = CHIP_IS_E1x(bp) ?
+			sizeof(struct hc_status_block_data_e1x) :
+			sizeof(struct hc_status_block_data_e2);
 		data_size /= sizeof(u32);
-		sb_data_p = CHIP_IS_E2(bp) ?
-			(u32 *)&sb_data_e2 :
-			(u32 *)&sb_data_e1x;
+		sb_data_p = CHIP_IS_E1x(bp) ?
+			(u32 *)&sb_data_e1x :
+			(u32 *)&sb_data_e2;
 		/* copy sb data in here */
 		for (j = 0; j < data_size; j++)
 			*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
 				CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
 				j * sizeof(u32));
 
-		if (CHIP_IS_E2(bp)) {
-			pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
-				"vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
+		if (!CHIP_IS_E1x(bp)) {
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
+				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
+				"state(0x%x)\n",
 				sb_data_e2.common.p_func.pf_id,
 				sb_data_e2.common.p_func.vf_id,
 				sb_data_e2.common.p_func.vf_valid,
 				sb_data_e2.common.p_func.vnic_id,
-				sb_data_e2.common.same_igu_sb_1b);
+				sb_data_e2.common.same_igu_sb_1b,
+				sb_data_e2.common.state);
 		} else {
-			pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
-				"vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
+				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
+				"state(0x%x)\n",
 				sb_data_e1x.common.p_func.pf_id,
 				sb_data_e1x.common.p_func.vf_id,
 				sb_data_e1x.common.p_func.vf_valid,
 				sb_data_e1x.common.p_func.vnic_id,
-				sb_data_e1x.common.same_igu_sb_1b);
+				sb_data_e1x.common.same_igu_sb_1b,
+				sb_data_e1x.common.state);
 		}
 
 		/* SB_SMs data */
@@ -1092,6 +985,373 @@
 	BNX2X_ERR("end crash dump -----------------\n");
 }
 
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC		10000	/* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL	50	/* usec */
+#define	FLR_POLL_CNT		(FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+	int pN;
+	u32 init_crd;
+	u32 crd;
+	u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+	int pN;
+	u32 lines_occup;
+	u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+				     struct pbf_pN_buf_regs *regs,
+				     u32 poll_count)
+{
+	u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+	u32 cur_cnt = poll_count;
+
+	crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+	crd = crd_start = REG_RD(bp, regs->crd);
+	init_crd = REG_RD(bp, regs->init_crd);
+
+	DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+	DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
+	DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+	while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+	       (init_crd - crd_start))) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERAVAL);
+			crd = REG_RD(bp, regs->crd);
+			crd_freed = REG_RD(bp, regs->crd_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
+			   regs->pN, crd);
+			DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+			   regs->pN, crd_freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+				     struct pbf_pN_cmd_regs *regs,
+				     u32 poll_count)
+{
+	u32 occup, to_free, freed, freed_start;
+	u32 cur_cnt = poll_count;
+
+	occup = to_free = REG_RD(bp, regs->lines_occup);
+	freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+	DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
+	DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+	while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERAVAL);
+			occup = REG_RD(bp, regs->lines_occup);
+			freed = REG_RD(bp, regs->lines_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
+			   regs->pN, occup);
+			DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+			   regs->pN, freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+				     u32 expected, u32 poll_count)
+{
+	u32 cur_cnt = poll_count;
+	u32 val;
+
+	while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+		udelay(FLR_WAIT_INTERAVAL);
+
+	return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+						  char *msg, u32 poll_cnt)
+{
+	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+	if (val != 0) {
+		BNX2X_ERR("%s usage count=%d\n", msg, val);
+		return 1;
+	}
+	return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+	/* adjust polling timeout */
+	if (CHIP_REV_IS_EMUL(bp))
+		return FLR_POLL_CNT * 2000;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		return FLR_POLL_CNT * 120;
+
+	return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+	struct pbf_pN_cmd_regs cmd_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q0 :
+			PBF_REG_P0_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+			PBF_REG_P0_TQ_LINES_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q1 :
+			PBF_REG_P1_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+			PBF_REG_P1_TQ_LINES_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_LB_Q :
+			PBF_REG_P4_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+			PBF_REG_P4_TQ_LINES_FREED_CNT}
+	};
+
+	struct pbf_pN_buf_regs buf_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q0 :
+			PBF_REG_P0_INIT_CRD ,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q0 :
+			PBF_REG_P0_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+			PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q1 :
+			PBF_REG_P1_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q1 :
+			PBF_REG_P1_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+			PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_LB_Q :
+			PBF_REG_P4_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_LB_Q :
+			PBF_REG_P4_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+			PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+	};
+
+	int i;
+
+	/* Verify the command queues are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+		bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+	/* Verify the transmission buffers are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+		bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+	(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+	(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+					 u32 poll_cnt)
+{
+	struct sdm_op_gen op_gen = {0};
+
+	u32 comp_addr = BAR_CSTRORM_INTMEM +
+			CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+	int ret = 0;
+
+	if (REG_RD(bp, comp_addr)) {
+		BNX2X_ERR("Cleanup complete is not 0\n");
+		return 1;
+	}
+
+	op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+	op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+	op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+	op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+	DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+	REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+	if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+		BNX2X_ERR("FW final cleanup did not succeed\n");
+		ret = 1;
+	}
+	/* Zero completion for nxt FLR */
+	REG_WR(bp, comp_addr, 0);
+
+	return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+	int pos;
+	u16 status;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return false;
+
+	pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+	return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+	/* wait for CFC PF usage-counter to zero (includes all the VFs) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			CFC_REG_NUM_LCIDS_INSIDE_PF,
+			"CFC PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+
+	/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			DORQ_REG_PF_USAGE_CNT,
+			"DQ PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+			"QM PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+			"Timers VNIC usage counter timed out",
+			poll_cnt))
+		return 1;
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+			"Timers NUM_SCANS usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait DMAE PF usage counter to zero */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			dmae_reg_go_c[INIT_DMAE_C(bp)],
+			"DMAE dommand register timed out",
+			poll_cnt))
+		return 1;
+
+	return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+	DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, PBF_REG_DISABLE_PF);
+	DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+	   val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+	DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+	/* Re-enable PF target read access */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+	/* Poll HW usage counters */
+	if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+		return -EBUSY;
+
+	/* Zero the igu 'trailing edge' and 'leading edge' */
+
+	/* Send the FW cleanup command */
+	if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+		return -EBUSY;
+
+	/* ATC cleanup */
+
+	/* Verify TX hw is flushed */
+	bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+	/* Wait 100ms (not adjusted according to platform) */
+	msleep(100);
+
+	/* Verify no pending pci transactions */
+	if (bnx2x_is_pcie_pending(bp->pdev))
+		BNX2X_ERR("PCIE Transactions still pending\n");
+
+	/* Debug */
+	bnx2x_hw_enable_status(bp);
+
+	/*
+	 * Master enable - Due to WB DMAE writes performed before this
+	 * register is re-initialized as part of the regular function init
+	 */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+	return 0;
+}
+
 static void bnx2x_hc_int_enable(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
@@ -1285,10 +1545,6 @@
 	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
 	int i, offset;
 
-	/* disable interrupt handling */
-	atomic_inc(&bp->intr_sem);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
 	if (disable_hw)
 		/* prevent the HW from sending interrupts */
 		bnx2x_int_disable(bp);
@@ -1301,12 +1557,13 @@
 		offset++;
 #endif
 		for_each_eth_queue(bp, i)
-			synchronize_irq(bp->msix_table[i + offset].vector);
+			synchronize_irq(bp->msix_table[offset++].vector);
 	} else
 		synchronize_irq(bp->pdev->irq);
 
 	/* make sure sp_task is not running */
 	cancel_delayed_work(&bp->sp_task);
+	cancel_delayed_work(&bp->period_task);
 	flush_workqueue(bnx2x_wq);
 }
 
@@ -1350,53 +1607,114 @@
 	return false;
 }
 
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp:	driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+	if (BP_PATH(bp))
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+	else
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
 #ifdef BCM_CNIC
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
 #endif
 
-void bnx2x_sp_event(struct bnx2x_fastpath *fp,
-			   union eth_rx_cqe *rr_cqe)
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 {
 	struct bnx2x *bp = fp->bp;
 	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
 	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+	struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
 
 	DP(BNX2X_MSG_SP,
 	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
 	   fp->index, cid, command, bp->state,
 	   rr_cqe->ramrod_cqe.ramrod_type);
 
-	switch (command | fp->state) {
-	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
+	switch (command) {
+	case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+		DP(NETIF_MSG_IFUP, "got UPDATE ramrod. CID %d\n", cid);
+		drv_cmd = BNX2X_Q_CMD_UPDATE;
+		break;
+	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
 		DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
-		fp->state = BNX2X_FP_STATE_OPEN;
+		drv_cmd = BNX2X_Q_CMD_SETUP;
 		break;
 
-	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
+	case (RAMROD_CMD_ID_ETH_HALT):
 		DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
-		fp->state = BNX2X_FP_STATE_HALTED;
+		drv_cmd = BNX2X_Q_CMD_HALT;
 		break;
 
-	case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
+	case (RAMROD_CMD_ID_ETH_TERMINATE):
 		DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
-		fp->state = BNX2X_FP_STATE_TERMINATED;
+		drv_cmd = BNX2X_Q_CMD_TERMINATE;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_EMPTY):
+		DP(NETIF_MSG_IFDOWN, "got MULTI[%d] empty ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_EMPTY;
 		break;
 
 	default:
-		BNX2X_ERR("unexpected MC reply (%d)  "
-			  "fp[%d] state is %x\n",
-			  command, fp->index, fp->state);
-		break;
+		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+			  command, fp->index);
+		return;
 	}
 
+	if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+	    q_obj->complete_cmd(bp, q_obj, drv_cmd))
+		/* q_obj->complete_cmd() failure means that this was
+		 * an unexpected completion.
+		 *
+		 * In this case we don't want to increase the bp->spq_left
+		 * because apparently we haven't sent this command the first
+		 * place.
+		 */
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#else
+		return;
+#endif
+
 	smp_mb__before_atomic_inc();
 	atomic_inc(&bp->cq_spq_left);
-	/* push the change in fp->state and towards the memory */
-	smp_wmb();
+	/* push the change in bp->spq_left and towards the memory */
+	smp_mb__after_atomic_inc();
 
 	return;
 }
 
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+	u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+	bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+				 start);
+}
+
 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
 {
 	struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1411,12 +1729,6 @@
 	}
 	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
 
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
 		return IRQ_HANDLED;
@@ -1427,7 +1739,7 @@
 
 		mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
 		if (status & mask) {
-			/* Handle Rx and Tx according to SB id */
+			/* Handle Rx or Tx according to SB id */
 			prefetch(fp->rx_cons_sb);
 			prefetch(fp->tx_cons_sb);
 			prefetch(&fp->sb_running_index[SM_RX_ID]);
@@ -1441,11 +1753,13 @@
 	if (status & (mask | 0x1)) {
 		struct cnic_ops *c_ops = NULL;
 
-		rcu_read_lock();
-		c_ops = rcu_dereference(bp->cnic_ops);
-		if (c_ops)
-			c_ops->cnic_handler(bp->cnic_data, NULL);
-		rcu_read_unlock();
+		if (likely(bp->state == BNX2X_STATE_OPEN)) {
+			rcu_read_lock();
+			c_ops = rcu_dereference(bp->cnic_ops);
+			if (c_ops)
+				c_ops->cnic_handler(bp->cnic_data, NULL);
+			rcu_read_unlock();
+		}
 
 		status &= ~mask;
 	}
@@ -1466,9 +1780,6 @@
 	return IRQ_HANDLED;
 }
 
-/* end of fast path */
-
-
 /* Link */
 
 /*
@@ -1520,6 +1831,11 @@
 	return -EAGAIN;
 }
 
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
 {
 	u32 lock_status;
@@ -1640,6 +1956,53 @@
 	return 0;
 }
 
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+	u32 gpio_reg = 0;
+	int rc = 0;
+
+	/* Any port swapping should be handled by caller. */
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+		/* set CLR */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+		/* set SET */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+		/* set FLOAT */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc == 0)
+		REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return rc;
+}
+
 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 {
 	/* The GPIO should be swapped if swap register is set and active */
@@ -1732,45 +2095,6 @@
 	return 0;
 }
 
-int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
-{
-	u32 sel_phy_idx = 0;
-	if (bp->link_vars.link_up) {
-		sel_phy_idx = EXT_PHY1;
-		/* In case link is SERDES, check if the EXT_PHY2 is the one */
-		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
-		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
-			sel_phy_idx = EXT_PHY2;
-	} else {
-
-		switch (bnx2x_phy_selection(&bp->link_params)) {
-		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
-		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
-		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-		       sel_phy_idx = EXT_PHY1;
-		       break;
-		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
-		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-		       sel_phy_idx = EXT_PHY2;
-		       break;
-		}
-	}
-	/*
-	* The selected actived PHY is always after swapping (in case PHY
-	* swapping is enabled). So when swapping is enabled, we need to reverse
-	* the configuration
-	*/
-
-	if (bp->link_params.multi_phy_config &
-	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
-		if (sel_phy_idx == EXT_PHY1)
-			sel_phy_idx = EXT_PHY2;
-		else if (sel_phy_idx == EXT_PHY2)
-			sel_phy_idx = EXT_PHY1;
-	}
-	return LINK_CONFIG_IDX(sel_phy_idx);
-}
-
 void bnx2x_calc_fc_adv(struct bnx2x *bp)
 {
 	u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -1827,7 +2151,8 @@
 		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
 			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 			bnx2x_link_report(bp);
-		}
+		} else
+			queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 		bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
 		return rc;
 	}
@@ -1941,8 +2266,12 @@
 		bp->vn_weight_sum += vn_min_rate;
 	}
 
-	/* ... only if all min rates are zeros - disable fairness */
-	if (all_zero) {
+	/* if ETS or all min rates are zeros - disable fairness */
+	if (BNX2X_IS_ETS_ENABLED(bp)) {
+		bp->cmng.flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+	} else if (all_zero) {
 		bp->cmng.flags.cmng_enables &=
 					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
@@ -2143,11 +2472,11 @@
 			       pause_enabled);
 		}
 
-		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+		if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
 			struct host_port_stats *pstats;
 
 			pstats = bnx2x_sp(bp, port_stats);
-			/* reset old bmac stats */
+			/* reset old mac stats */
 			memset(&(pstats->mac_stx[0]), 0,
 			       sizeof(struct mac_stx));
 		}
@@ -2197,12 +2526,23 @@
 	bp->port.pmf = 1;
 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 
+	/*
+	 * We need the mb() to ensure the ordering between the writing to
+	 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+	 */
+	smp_mb();
+
+	/* queue a periodic task */
+	queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+	bnx2x_dcbx_pmf_update(bp);
+
 	/* enable nig attention */
 	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
 	if (bp->common.int_block == INT_BLOCK_HC) {
 		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
 		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
 		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
 	}
@@ -2232,7 +2572,8 @@
 	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
 	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
 
-	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
+	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+			(command | seq), param);
 
 	do {
 		/* let the FW do it's magic ... */
@@ -2263,141 +2604,25 @@
 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
 {
 #ifdef BCM_CNIC
-	if (IS_FCOE_FP(fp) && IS_MF(bp))
+	/* Statistics are not supported for CNIC Clients at the moment */
+	if (IS_FCOE_FP(fp))
 		return false;
 #endif
 	return true;
 }
 
-/* must be called under rtnl_lock */
-static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
-	u32 mask = (1 << cl_id);
+	if (CHIP_IS_E1x(bp)) {
+		struct tstorm_eth_function_common_config tcfg = {0};
 
-	/* initial seeting is BNX2X_ACCEPT_NONE */
-	u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
-	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
-	u8 unmatched_unicast = 0;
-
-	if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
-		unmatched_unicast = 1;
-
-	if (filters & BNX2X_PROMISCUOUS_MODE) {
-		/* promiscious - accept all, drop none */
-		drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
-		accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
-		if (IS_MF_SI(bp)) {
-			/*
-			 * SI mode defines to accept in promiscuos mode
-			 * only unmatched packets
-			 */
-			unmatched_unicast = 1;
-			accp_all_ucast = 0;
-		}
+		storm_memset_func_cfg(bp, &tcfg, p->func_id);
 	}
-	if (filters & BNX2X_ACCEPT_UNICAST) {
-		/* accept matched ucast */
-		drop_all_ucast = 0;
-	}
-	if (filters & BNX2X_ACCEPT_MULTICAST)
-		/* accept matched mcast */
-		drop_all_mcast = 0;
-
-	if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
-		/* accept all mcast */
-		drop_all_ucast = 0;
-		accp_all_ucast = 1;
-	}
-	if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
-		/* accept all mcast */
-		drop_all_mcast = 0;
-		accp_all_mcast = 1;
-	}
-	if (filters & BNX2X_ACCEPT_BROADCAST) {
-		/* accept (all) bcast */
-		drop_all_bcast = 0;
-		accp_all_bcast = 1;
-	}
-
-	bp->mac_filters.ucast_drop_all = drop_all_ucast ?
-		bp->mac_filters.ucast_drop_all | mask :
-		bp->mac_filters.ucast_drop_all & ~mask;
-
-	bp->mac_filters.mcast_drop_all = drop_all_mcast ?
-		bp->mac_filters.mcast_drop_all | mask :
-		bp->mac_filters.mcast_drop_all & ~mask;
-
-	bp->mac_filters.bcast_drop_all = drop_all_bcast ?
-		bp->mac_filters.bcast_drop_all | mask :
-		bp->mac_filters.bcast_drop_all & ~mask;
-
-	bp->mac_filters.ucast_accept_all = accp_all_ucast ?
-		bp->mac_filters.ucast_accept_all | mask :
-		bp->mac_filters.ucast_accept_all & ~mask;
-
-	bp->mac_filters.mcast_accept_all = accp_all_mcast ?
-		bp->mac_filters.mcast_accept_all | mask :
-		bp->mac_filters.mcast_accept_all & ~mask;
-
-	bp->mac_filters.bcast_accept_all = accp_all_bcast ?
-		bp->mac_filters.bcast_accept_all | mask :
-		bp->mac_filters.bcast_accept_all & ~mask;
-
-	bp->mac_filters.unmatched_unicast = unmatched_unicast ?
-		bp->mac_filters.unmatched_unicast | mask :
-		bp->mac_filters.unmatched_unicast & ~mask;
-}
-
-static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
-{
-	struct tstorm_eth_function_common_config tcfg = {0};
-	u16 rss_flgs;
-
-	/* tpa */
-	if (p->func_flgs & FUNC_FLG_TPA)
-		tcfg.config_flags |=
-		TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
-
-	/* set rss flags */
-	rss_flgs = (p->rss->mode <<
-		TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
-
-	if (p->rss->cap & RSS_IPV4_CAP)
-		rss_flgs |= RSS_IPV4_CAP_MASK;
-	if (p->rss->cap & RSS_IPV4_TCP_CAP)
-		rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
-	if (p->rss->cap & RSS_IPV6_CAP)
-		rss_flgs |= RSS_IPV6_CAP_MASK;
-	if (p->rss->cap & RSS_IPV6_TCP_CAP)
-		rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
-
-	tcfg.config_flags |= rss_flgs;
-	tcfg.rss_result_mask = p->rss->result_mask;
-
-	storm_memset_func_cfg(bp, &tcfg, p->func_id);
 
 	/* Enable the function in the FW */
 	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
 	storm_memset_func_en(bp, p->func_id, 1);
 
-	/* statistics */
-	if (p->func_flgs & FUNC_FLG_STATS) {
-		struct stats_indication_flags stats_flags = {0};
-		stats_flags.collect_eth = 1;
-
-		storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
-
-		storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
-
-		storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
-
-		storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
-	}
-
 	/* spq */
 	if (p->func_flgs & FUNC_FLG_SPQ) {
 		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
@@ -2406,39 +2631,62 @@
 	}
 }
 
-static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp)
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+					      struct bnx2x_fastpath *fp,
+					      bool leading)
 {
-	u16 flags = 0;
+	unsigned long flags = 0;
 
-	/* calculate queue flags */
-	flags |= QUEUE_FLG_CACHE_ALIGN;
-	flags |= QUEUE_FLG_HC;
-	flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
+	/* PF driver will always initialize the Queue to an ACTIVE state */
+	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
 
-	flags |= QUEUE_FLG_VLAN;
-	DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+	/* calculate other queue flags */
+	if (IS_MF_SD(bp))
+		__set_bit(BNX2X_Q_FLG_OV, &flags);
+
+	if (IS_FCOE_FP(fp))
+		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
 
 	if (!fp->disable_tpa)
-		flags |= QUEUE_FLG_TPA;
+		__set_bit(BNX2X_Q_FLG_TPA, &flags);
 
-	flags = stat_counter_valid(bp, fp) ?
-			(flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
+	if (stat_counter_valid(bp, fp)) {
+		__set_bit(BNX2X_Q_FLG_STATS, &flags);
+		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+	}
+
+	if (leading) {
+		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+		__set_bit(BNX2X_Q_FLG_MCAST, &flags);
+	}
+
+	/* Always set HW VLAN stripping */
+	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
 
 	return flags;
 }
 
-static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
-	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
-	struct bnx2x_rxq_init_params *rxq_init)
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init)
 {
-	u16 max_sge = 0;
+	gen_init->stat_id = bnx2x_stats_id(fp);
+	gen_init->spcl_id = fp->cl_id;
+
+	/* Always use mini-jumbo MTU for FCoE L2 ring */
+	if (IS_FCOE_FP(fp))
+		gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+	else
+		gen_init->mtu = bp->dev->mtu;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+	struct bnx2x_rxq_setup_params *rxq_init)
+{
+	u8 max_sge = 0;
 	u16 sge_sz = 0;
 	u16 tpa_agg_size = 0;
 
-	/* calculate queue flags */
-	u16 flags = bnx2x_get_cl_flags(bp, fp);
-
 	if (!fp->disable_tpa) {
 		pause->sge_th_hi = 250;
 		pause->sge_th_lo = 150;
@@ -2459,33 +2707,37 @@
 		pause->bd_th_lo = 250;
 		pause->rcq_th_hi = 350;
 		pause->rcq_th_lo = 250;
-		pause->sge_th_hi = 0;
-		pause->sge_th_lo = 0;
+
 		pause->pri_map = 1;
 	}
 
 	/* rxq setup */
-	rxq_init->flags = flags;
-	rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
 	rxq_init->dscr_map = fp->rx_desc_mapping;
 	rxq_init->sge_map = fp->rx_sge_mapping;
 	rxq_init->rcq_map = fp->rx_comp_mapping;
 	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
 
-	/* Always use mini-jumbo MTU for FCoE L2 ring */
-	if (IS_FCOE_FP(fp))
-		rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
-	else
-		rxq_init->mtu = bp->dev->mtu;
+	/* This should be a maximum number of data bytes that may be
+	 * placed on the BD (not including paddings).
+	 */
+	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+		IP_HEADER_ALIGNMENT_PADDING;
 
-	rxq_init->buf_sz = fp->rx_buf_size;
 	rxq_init->cl_qzone_id = fp->cl_qzone_id;
-	rxq_init->cl_id = fp->cl_id;
-	rxq_init->spcl_id = fp->cl_id;
-	rxq_init->stat_id = fp->cl_id;
 	rxq_init->tpa_agg_sz = tpa_agg_size;
 	rxq_init->sge_buf_sz = sge_sz;
 	rxq_init->max_sges_pkt = max_sge;
+	rxq_init->rss_engine_id = BP_FUNC(bp);
+
+	/* Maximum number or simultaneous TPA aggregation for this Queue.
+	 *
+	 * For PF Clients it should be the maximum avaliable number.
+	 * VF driver(s) may want to define it to a smaller value.
+	 */
+	rxq_init->max_tpa_queues =
+		(CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+		ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+
 	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
 	rxq_init->fw_sb_id = fp->fw_sb_id;
 
@@ -2493,46 +2745,35 @@
 		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 	else
 		rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
-
-	rxq_init->cid = HW_CID(bp, fp->cid);
-
-	rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
 }
 
-static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
-	struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init)
 {
-	u16 flags = bnx2x_get_cl_flags(bp, fp);
-
-	txq_init->flags = flags;
-	txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
 	txq_init->dscr_map = fp->tx_desc_mapping;
-	txq_init->stat_id = fp->cl_id;
-	txq_init->cid = HW_CID(bp, fp->cid);
 	txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
 	txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
 	txq_init->fw_sb_id = fp->fw_sb_id;
 
+	/*
+	 * set the tss leading client id for TX classfication ==
+	 * leading RSS client id
+	 */
+	txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
 	if (IS_FCOE_FP(fp)) {
 		txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
 		txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
 	}
-
-	txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
 }
 
 static void bnx2x_pf_init(struct bnx2x *bp)
 {
 	struct bnx2x_func_init_params func_init = {0};
-	struct bnx2x_rss_params rss = {0};
 	struct event_ring_data eq_data = { {0} };
 	u16 flags;
 
-	/* pf specific setups */
-	if (!CHIP_IS_E1(bp))
-		storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
-
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		/* reset IGU PF statistics: MSIX + ATTN */
 		/* PF */
 		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
@@ -2550,27 +2791,14 @@
 	/* function setup flags */
 	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
 
-	if (CHIP_IS_E1x(bp))
-		flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
-	else
-		flags |= FUNC_FLG_TPA;
-
-	/* function setup */
-
-	/**
-	 * Although RSS is meaningless when there is a single HW queue we
-	 * still need it enabled in order to have HW Rx hash generated.
+	/* This flag is relevant for E1x only.
+	 * E2 doesn't have a TPA configuration in a function level.
 	 */
-	rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
-		   RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
-	rss.mode = bp->multi_mode;
-	rss.result_mask = MULTI_MASK;
-	func_init.rss = &rss;
+	flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
 
 	func_init.func_flgs = flags;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = BP_FUNC(bp);
-	func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
 	func_init.spq_map = bp->spq_mapping;
 	func_init.spq_prod = bp->spq_prod_idx;
 
@@ -2579,11 +2807,11 @@
 	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
 
 	/*
-	Congestion management values depend on the link rate
-	There is no active link so initial link rate is set to 10 Gbps.
-	When the link comes up The congestion management values are
-	re-calculated according to the actual link rate.
-	*/
+	 * Congestion management values depend on the link rate
+	 * There is no active link so initial link rate is set to 10 Gbps.
+	 * When the link comes up The congestion management values are
+	 * re-calculated according to the actual link rate.
+	 */
 	bp->link_vars.line_speed = SPEED_10000;
 	bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
 
@@ -2591,10 +2819,6 @@
 	if (bp->port.pmf)
 		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 
-	/* no rx until link is up */
-	bp->rx_mode = BNX2X_RX_MODE_NONE;
-	bnx2x_set_storm_rx_mode(bp);
-
 	/* init Event Queue */
 	eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
 	eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
@@ -2609,11 +2833,9 @@
 {
 	int port = BP_PORT(bp);
 
-	netif_tx_disable(bp->dev);
+	bnx2x_tx_disable(bp);
 
 	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
-	netif_carrier_off(bp->dev);
 }
 
 static void bnx2x_e1h_enable(struct bnx2x *bp)
@@ -2716,12 +2938,47 @@
 	mmiowb();
 }
 
-/* the slow path queue is odd since completions arrive on the fastpath ring */
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd:	command to check
+ * @cmd_type:	command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+	if ((cmd_type == NONE_CONNECTION_TYPE) ||
+	    (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+	    (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+		return true;
+	else
+		return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp:		driver handle
+ * @command:	command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid:	SW CID the command is related to
+ * @data_hi:	command private data address (high 32 bits)
+ * @data_lo:	command private data address (low 32 bits)
+ * @cmd_type:	command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
-		  u32 data_hi, u32 data_lo, int common)
+		  u32 data_hi, u32 data_lo, int cmd_type)
 {
 	struct eth_spe *spe;
 	u16 type;
+	bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
@@ -2751,17 +3008,7 @@
 			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
 				    HW_CID(bp, cid));
 
-	if (common)
-		/* Common ramrods:
-		 *	FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
-		 *	TRAFFIC_STOP, TRAFFIC_START
-		 */
-		type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
-			& SPE_HDR_CONN_TYPE;
-	else
-		/* ETH ramrods: SETUP, HALT */
-		type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
-			& SPE_HDR_CONN_TYPE;
+	type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
 
 	type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
 		 SPE_HDR_FUNCTION_ID);
@@ -2773,7 +3020,8 @@
 
 	/* stats ramrod has it's own slot on the spq */
 	if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
-		/* It's ok if the actual decrement is issued towards the memory
+		/*
+		 * It's ok if the actual decrement is issued towards the memory
 		 * somewhere between the spin_lock and spin_unlock. Thus no
 		 * more explict memory barrier is needed.
 		 */
@@ -2892,9 +3140,15 @@
 
 			/* save nig interrupt mask */
 			nig_mask = REG_RD(bp, nig_int_mask_addr);
-			REG_WR(bp, nig_int_mask_addr, 0);
 
-			bnx2x_link_attn(bp);
+			/* If nig_mask is not set, no need to call the update
+			 * function.
+			 */
+			if (nig_mask) {
+				REG_WR(bp, nig_int_mask_addr, 0);
+
+				bnx2x_link_attn(bp);
+			}
 
 			/* handle unicore attn? */
 		}
@@ -2999,8 +3253,7 @@
 		bnx2x_fan_failure(bp);
 	}
 
-	if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
-		    AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
+	if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
 		bnx2x_acquire_phy_lock(bp);
 		bnx2x_handle_module_detect_int(&bp->link_params);
 		bnx2x_release_phy_lock(bp);
@@ -3063,13 +3316,13 @@
 	}
 
 	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
-
 		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
-		BNX2X_ERR("PXP hw attention 0x%x\n", val);
+		BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
 		/* RQ_USDMDP_FIFO_OVERFLOW */
 		if (val & 0x18000)
 			BNX2X_ERR("FATAL error from PXP\n");
-		if (CHIP_IS_E2(bp)) {
+
+		if (!CHIP_IS_E1x(bp)) {
 			val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
 			BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
 		}
@@ -3117,17 +3370,27 @@
 			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
 				bnx2x_pmf_update(bp);
 
-			/* Always call it here: bnx2x_link_report() will
-			 * prevent the link indication duplication.
-			 */
-			bnx2x__link_status_update(bp);
-
 			if (bp->port.pmf &&
 			    (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
 				bp->dcbx_enabled > 0)
 				/* start dcbx state machine */
 				bnx2x_dcbx_set_params(bp,
 					BNX2X_DCBX_STATE_NEG_RECEIVED);
+			if (bp->link_vars.periodic_flags &
+			    PERIODIC_FLAGS_LINK_EVENT) {
+				/*  sync with link */
+				bnx2x_acquire_phy_lock(bp);
+				bp->link_vars.periodic_flags &=
+					~PERIODIC_FLAGS_LINK_EVENT;
+				bnx2x_release_phy_lock(bp);
+				if (IS_MF(bp))
+					bnx2x_link_sync_notify(bp);
+				bnx2x_link_report(bp);
+			}
+			/* Always call it here: bnx2x_link_report() will
+			 * prevent the link indication duplication.
+			 */
+			bnx2x__link_status_update(bp);
 		} else if (attn & BNX2X_MC_ASSERT_BITS) {
 
 			BNX2X_ERR("MC assert!\n");
@@ -3163,72 +3426,185 @@
 	}
 }
 
-#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
-#define LOAD_COUNTER_BITS	16 /* Number of bits for load counter */
-#define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
-#define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
-#define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
+/*
+ * Bits map:
+ * 0-7   - Engine0 load counter.
+ * 8-15  - Engine1 load counter.
+ * 16    - Engine0 RESET_IN_PROGRESS bit.
+ * 17    - Engine1 RESET_IN_PROGRESS bit.
+ * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ *         on the engine
+ * 19    - Engine1 ONE_IS_LOADED.
+ * 20    - Chip reset flow bit. When set none-leader must wait for both engines
+ *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ *         just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG		MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK	0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT	0
+#define BNX2X_PATH1_LOAD_CNT_MASK	0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT	8
+#define BNX2X_PATH0_RST_IN_PROG_BIT	0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT	0x00020000
+#define BNX2X_GLOBAL_RESET_BIT		0x00040000
 
 /*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
  * should be run under rtnl lock
  */
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+	return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
 {
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	val &= ~(1 << RESET_DONE_FLAG_SHIFT);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* Clear the bit */
+	val &= ~bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 	barrier();
 	mmiowb();
 }
 
 /*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
  * should be run under rtnl lock
  */
-static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 {
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	val |= (1 << 16);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* Set the bit */
+	val |= bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 	barrier();
 	mmiowb();
 }
 
 /*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
  * should be run under rtnl lock
  */
-bool bnx2x_reset_is_done(struct bnx2x *bp)
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
 {
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
-	return (val & RESET_DONE_FLAG_MASK) ? false : true;
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = engine ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* return false if bit is set */
+	return (val & bit) ? false : true;
 }
 
 /*
+ * Increment the load counter for the current engine.
+ *
  * should be run under rtnl lock
  */
-inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
 {
-	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
 
 	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
-	val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
-	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* increment... */
+	val1++;
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 	barrier();
 	mmiowb();
 }
 
-/*
- * should be run under rtnl lock
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp:		driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
  */
 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
 {
-	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
 
 	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
-	val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
-	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* decrement... */
+	val1--;
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
 	barrier();
 	mmiowb();
 
@@ -3236,17 +3612,39 @@
 }
 
 /*
+ * Read the load counter for the current engine.
+ *
  * should be run under rtnl lock
  */
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
 {
-	return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK);
+	u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT);
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+	val = (val & mask) >> shift;
+
+	DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+	return val;
 }
 
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
 {
-	u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
 }
 
 static inline void _print_next_block(int idx, const char *blk)
@@ -3256,7 +3654,8 @@
 	pr_cont("%s", blk);
 }
 
-static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+						  bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -3265,19 +3664,33 @@
 		if (sig & cur_bit) {
 			switch (cur_bit) {
 			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-				_print_next_block(par_num++, "BRB");
+				if (print)
+					_print_next_block(par_num++, "BRB");
 				break;
 			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-				_print_next_block(par_num++, "PARSER");
+				if (print)
+					_print_next_block(par_num++, "PARSER");
 				break;
 			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "TSDM");
+				if (print)
+					_print_next_block(par_num++, "TSDM");
 				break;
 			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-				_print_next_block(par_num++, "SEARCHER");
+				if (print)
+					_print_next_block(par_num++,
+							  "SEARCHER");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TCM");
 				break;
 			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "TSEMI");
+				if (print)
+					_print_next_block(par_num++, "TSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XPB");
 				break;
 			}
 
@@ -3289,7 +3702,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+						  bool *global, bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -3297,38 +3711,64 @@
 		cur_bit = ((u32)0x1 << i);
 		if (sig & cur_bit) {
 			switch (cur_bit) {
-			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-				_print_next_block(par_num++, "PBCLIENT");
+			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "PBF");
 				break;
 			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
-				_print_next_block(par_num++, "QM");
+				if (print)
+					_print_next_block(par_num++, "QM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TM");
 				break;
 			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "XSDM");
+				if (print)
+					_print_next_block(par_num++, "XSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XCM");
 				break;
 			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "XSEMI");
+				if (print)
+					_print_next_block(par_num++, "XSEMI");
 				break;
 			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
-				_print_next_block(par_num++, "DOORBELLQ");
+				if (print)
+					_print_next_block(par_num++,
+							  "DOORBELLQ");
+				break;
+			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "NIG");
 				break;
 			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
-				_print_next_block(par_num++, "VAUX PCI CORE");
+				if (print)
+					_print_next_block(par_num++,
+							  "VAUX PCI CORE");
+				*global = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
-				_print_next_block(par_num++, "DEBUG");
+				if (print)
+					_print_next_block(par_num++, "DEBUG");
 				break;
 			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
-				_print_next_block(par_num++, "USDM");
+				if (print)
+					_print_next_block(par_num++, "USDM");
 				break;
 			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "USEMI");
+				if (print)
+					_print_next_block(par_num++, "USEMI");
 				break;
 			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
-				_print_next_block(par_num++, "UPB");
+				if (print)
+					_print_next_block(par_num++, "UPB");
 				break;
 			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "CSDM");
+				if (print)
+					_print_next_block(par_num++, "CSDM");
 				break;
 			}
 
@@ -3340,7 +3780,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+						  bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -3349,26 +3790,37 @@
 		if (sig & cur_bit) {
 			switch (cur_bit) {
 			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "CSEMI");
+				if (print)
+					_print_next_block(par_num++, "CSEMI");
 				break;
 			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-				_print_next_block(par_num++, "PXP");
+				if (print)
+					_print_next_block(par_num++, "PXP");
 				break;
 			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-				_print_next_block(par_num++,
+				if (print)
+					_print_next_block(par_num++,
 					"PXPPCICLOCKCLIENT");
 				break;
 			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-				_print_next_block(par_num++, "CFC");
+				if (print)
+					_print_next_block(par_num++, "CFC");
 				break;
 			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-				_print_next_block(par_num++, "CDU");
+				if (print)
+					_print_next_block(par_num++, "CDU");
+				break;
+			case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "DMAE");
 				break;
 			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-				_print_next_block(par_num++, "IGU");
+				if (print)
+					_print_next_block(par_num++, "IGU");
 				break;
 			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-				_print_next_block(par_num++, "MISC");
+				if (print)
+					_print_next_block(par_num++, "MISC");
 				break;
 			}
 
@@ -3380,7 +3832,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+						  bool *global, bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -3389,16 +3842,27 @@
 		if (sig & cur_bit) {
 			switch (cur_bit) {
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
-				_print_next_block(par_num++, "MCP ROM");
+				if (print)
+					_print_next_block(par_num++, "MCP ROM");
+				*global = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
-				_print_next_block(par_num++, "MCP UMP RX");
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP UMP RX");
+				*global = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
-				_print_next_block(par_num++, "MCP UMP TX");
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP UMP TX");
+				*global = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
-				_print_next_block(par_num++, "MCP SCPAD");
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP SCPAD");
+				*global = true;
 				break;
 			}
 
@@ -3410,8 +3874,8 @@
 	return par_num;
 }
 
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
-				     u32 sig2, u32 sig3)
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+				     u32 sig0, u32 sig1, u32 sig2, u32 sig3)
 {
 	if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
 	    (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
@@ -3423,23 +3887,32 @@
 			  sig1 & HW_PRTY_ASSERT_SET_1,
 			  sig2 & HW_PRTY_ASSERT_SET_2,
 			  sig3 & HW_PRTY_ASSERT_SET_3);
-		printk(KERN_ERR"%s: Parity errors detected in blocks: ",
-		       bp->dev->name);
-		par_num = bnx2x_print_blocks_with_parity0(
-			sig0 & HW_PRTY_ASSERT_SET_0, par_num);
-		par_num = bnx2x_print_blocks_with_parity1(
-			sig1 & HW_PRTY_ASSERT_SET_1, par_num);
-		par_num = bnx2x_print_blocks_with_parity2(
-			sig2 & HW_PRTY_ASSERT_SET_2, par_num);
-		par_num = bnx2x_print_blocks_with_parity3(
-			sig3 & HW_PRTY_ASSERT_SET_3, par_num);
-		printk("\n");
+		if (print)
+			netdev_err(bp->dev,
+				   "Parity errors detected in blocks: ");
+		par_num = bnx2x_check_blocks_with_parity0(
+			sig0 & HW_PRTY_ASSERT_SET_0, par_num, print);
+		par_num = bnx2x_check_blocks_with_parity1(
+			sig1 & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+		par_num = bnx2x_check_blocks_with_parity2(
+			sig2 & HW_PRTY_ASSERT_SET_2, par_num, print);
+		par_num = bnx2x_check_blocks_with_parity3(
+			sig3 & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+		if (print)
+			pr_cont("\n");
 		return true;
 	} else
 		return false;
 }
 
-bool bnx2x_chk_parity_attn(struct bnx2x *bp)
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp:		driver handle
+ * @global:	true if there was a global attention
+ * @print:	show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
 {
 	struct attn_route attn;
 	int port = BP_PORT(bp);
@@ -3457,8 +3930,8 @@
 		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
 			     port*4);
 
-	return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
-					attn.sig[3]);
+	return bnx2x_parity_attn(bp, global, print, attn.sig[0], attn.sig[1],
+				 attn.sig[2], attn.sig[3]);
 }
 
 
@@ -3537,21 +4010,25 @@
 	u32 reg_addr;
 	u32 val;
 	u32 aeu_mask;
+	bool global = false;
 
 	/* need to take HW lock because MCP or other port might also
 	   try to handle this event */
 	bnx2x_acquire_alr(bp);
 
-	if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
+	if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
 		bp->recovery_state = BNX2X_RECOVERY_INIT;
-		bnx2x_set_reset_in_progress(bp);
 		schedule_delayed_work(&bp->reset_task, 0);
 		/* Disable HW interrupts */
 		bnx2x_int_disable(bp);
-		bnx2x_release_alr(bp);
 		/* In case of parity errors don't handle attentions so that
 		 * other function would "see" parity errors.
 		 */
+#else
+		bnx2x_panic();
+#endif
+		bnx2x_release_alr(bp);
 		return;
 	}
 
@@ -3559,7 +4036,7 @@
 	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
 	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
 	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		attn.sig[4] =
 		      REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
 	else
@@ -3655,6 +4132,15 @@
 		bnx2x_attn_int_deasserted(bp, deasserted);
 }
 
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update)
+{
+	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+			     igu_addr);
+}
+
 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
 {
 	/* No memory barriers */
@@ -3666,6 +4152,8 @@
 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
 				      union event_ring_elem *elem)
 {
+	u8 err = elem->message.error;
+
 	if (!bp->cnic_eth_dev.starting_cid  ||
 	    (cid < bp->cnic_eth_dev.starting_cid &&
 	    cid != bp->cnic_eth_dev.iscsi_l2_cid))
@@ -3673,16 +4161,122 @@
 
 	DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
 
-	if (unlikely(elem->message.data.cfc_del_event.error)) {
+	if (unlikely(err)) {
+
 		BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
 			  cid);
 		bnx2x_panic_dump(bp);
 	}
-	bnx2x_cnic_cfc_comp(bp, cid);
+	bnx2x_cnic_cfc_comp(bp, cid, err);
 	return 0;
 }
 #endif
 
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+	struct bnx2x_mcast_ramrod_params rparam;
+	int rc;
+
+	memset(&rparam, 0, sizeof(rparam));
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	netif_addr_lock_bh(bp->dev);
+
+	/* Clear pending state for the last command */
+	bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+	/* If there are pending mcast commands - send them */
+	if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+		if (rc < 0)
+			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+				  rc);
+	}
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+						   union event_ring_elem *elem)
+{
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+	u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* Always push next commands out, don't wait here */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+
+	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+	case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+		if (cid == BNX2X_ISCSI_ETH_CID)
+			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+		else
+#endif
+			vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+		break;
+		vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+	case BNX2X_FILTER_MCAST_PENDING:
+		/* This is only relevant for 57710 where multicast MACs are
+		 * configured as unicast MACs using the same ramrod.
+		 */
+		bnx2x_handle_mcast_eqe(bp);
+		return;
+	default:
+		BNX2X_ERR("Unsupported classification command: %d\n",
+			  elem->message.data.eth_event.echo);
+		return;
+	}
+
+	rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+	else if (rc > 0)
+		DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+	netif_addr_lock_bh(bp->dev);
+
+	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	/* Send rx_mode command again if was requested */
+	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+		bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+	struct bnx2x *bp, u32 cid)
+{
+#ifdef BCM_CNIC
+	if (cid == BNX2X_FCOE_ETH_CID)
+		return &bnx2x_fcoe(bp, q_obj);
+	else
+#endif
+		return &bnx2x_fp(bp, cid, q_obj);
+}
+
 static void bnx2x_eq_int(struct bnx2x *bp)
 {
 	u16 hw_cons, sw_cons, sw_prod;
@@ -3690,6 +4284,9 @@
 	u32 cid;
 	u8 opcode;
 	int spqe_cnt = 0;
+	struct bnx2x_queue_sp_obj *q_obj;
+	struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+	struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
 
 	hw_cons = le16_to_cpu(*bp->eq_cons_sb);
 
@@ -3724,7 +4321,8 @@
 		/* handle eq element */
 		switch (opcode) {
 		case EVENT_RING_OPCODE_STAT_QUERY:
-			DP(NETIF_MSG_TIMER, "got statistics comp event\n");
+			DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+			   bp->stats_comp++);
 			/* nothing to do with stats comp */
 			continue;
 
@@ -3739,12 +4337,13 @@
 #ifdef BCM_CNIC
 			if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
 				goto next_spqe;
-			if (cid == BNX2X_FCOE_ETH_CID)
-				bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
-			else
 #endif
-				bnx2x_fp(bp, cid, state) =
-						BNX2X_FP_STATE_CLOSED;
+			q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+				break;
+
+
 
 			goto next_spqe;
 
@@ -3752,42 +4351,75 @@
 			DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
 			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
 			goto next_spqe;
+
 		case EVENT_RING_OPCODE_START_TRAFFIC:
 			DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
 			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 			goto next_spqe;
+		case EVENT_RING_OPCODE_FUNCTION_START:
+			DP(NETIF_MSG_IFUP, "got FUNC_START ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+				break;
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_FUNCTION_STOP:
+			DP(NETIF_MSG_IFDOWN, "got FUNC_STOP ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+				break;
+
+			goto next_spqe;
 		}
 
 		switch (opcode | bp->state) {
-		case (EVENT_RING_OPCODE_FUNCTION_START |
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 		      BNX2X_STATE_OPENING_WAIT4_PORT):
-			DP(NETIF_MSG_IFUP, "got setup ramrod\n");
-			bp->state = BNX2X_STATE_FUNC_STARTED;
-			break;
-
-		case (EVENT_RING_OPCODE_FUNCTION_STOP |
-		      BNX2X_STATE_CLOSING_WAIT4_HALT):
-			DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
-			bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+			cid = elem->message.data.eth_event.echo &
+				BNX2X_SWCID_MASK;
+			DP(NETIF_MSG_IFUP, "got RSS_UPDATE ramrod. CID %d\n",
+			   cid);
+			rss_raw->clear_pending(rss_raw);
 			break;
 
 		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
 		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
-			DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-			if (elem->message.data.set_mac_event.echo)
-				bp->set_mac_pending = 0;
-			break;
-
 		case (EVENT_RING_OPCODE_SET_MAC |
 		      BNX2X_STATE_CLOSING_WAIT4_HALT):
-			DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
-			if (elem->message.data.set_mac_event.echo)
-				bp->set_mac_pending = 0;
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
+			bnx2x_handle_classification_eqe(bp, elem);
+			break;
+
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(NETIF_MSG_IFUP, "got mcast ramrod\n");
+			bnx2x_handle_mcast_eqe(bp);
+			break;
+
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(NETIF_MSG_IFUP, "got rx_mode ramrod\n");
+			bnx2x_handle_rx_mode_eqe(bp);
 			break;
 		default:
 			/* unknown event log error and continue */
-			BNX2X_ERR("Unknown EQ event %d\n",
-				  elem->message.opcode);
+			BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+				  elem->message.opcode, bp->state);
 		}
 next_spqe:
 		spqe_cnt++;
@@ -3810,12 +4442,6 @@
 	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
 	u16 status;
 
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return;
-	}
-
 	status = bnx2x_update_dsb_idx(bp);
 /*	if (status == 0)				     */
 /*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
@@ -3859,12 +4485,6 @@
 	struct net_device *dev = dev_instance;
 	struct bnx2x *bp = netdev_priv(dev);
 
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
 	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
 		     IGU_INT_DISABLE, 0);
 
@@ -3891,6 +4511,14 @@
 
 /* end of slow path */
 
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+		 bp->fw_drv_pulse_wr_seq);
+}
+
+
 static void bnx2x_timer(unsigned long data)
 {
 	struct bnx2x *bp = (struct bnx2x *) data;
@@ -3898,9 +4526,6 @@
 	if (!netif_running(bp->dev))
 		return;
 
-	if (atomic_read(&bp->intr_sem) != 0)
-		goto timer_restart;
-
 	if (poll) {
 		struct bnx2x_fastpath *fp = &bp->fp[0];
 
@@ -3917,7 +4542,7 @@
 		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
 		/* TBD - add SYSTEM_TIME */
 		drv_pulse = bp->fw_drv_pulse_wr_seq;
-		SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
+		bnx2x_drv_pulse(bp);
 
 		mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
 			     MCP_PULSE_SEQ_MASK);
@@ -3935,7 +4560,6 @@
 	if (bp->state == BNX2X_STATE_OPEN)
 		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
 
-timer_restart:
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
@@ -3981,18 +4605,16 @@
 	struct hc_status_block_data_e1x sb_data_e1x;
 
 	/* disable the function first */
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
-		sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
-		sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+		sb_data_e2.common.state = SB_DISABLED;
 		sb_data_e2.common.p_func.vf_valid = false;
 		sb_data_p = (u32 *)&sb_data_e2;
 		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
 	} else {
 		memset(&sb_data_e1x, 0,
 		       sizeof(struct hc_status_block_data_e1x));
-		sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
-		sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+		sb_data_e1x.common.state = SB_DISABLED;
 		sb_data_e1x.common.p_func.vf_valid = false;
 		sb_data_p = (u32 *)&sb_data_e1x;
 		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
@@ -4026,8 +4648,7 @@
 	struct hc_sp_status_block_data sp_sb_data;
 	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
 
-	sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
-	sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
+	sp_sb_data.state = SB_DISABLED;
 	sp_sb_data.p_func.vf_valid = false;
 
 	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
@@ -4070,8 +4691,9 @@
 
 	bnx2x_zero_fp_sb(bp, fw_sb_id);
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+		sb_data_e2.common.state = SB_ENABLED;
 		sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
 		sb_data_e2.common.p_func.vf_id = vfid;
 		sb_data_e2.common.p_func.vf_valid = vf_valid;
@@ -4085,6 +4707,7 @@
 	} else {
 		memset(&sb_data_e1x, 0,
 		       sizeof(struct hc_status_block_data_e1x));
+		sb_data_e1x.common.state = SB_ENABLED;
 		sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
 		sb_data_e1x.common.p_func.vf_id = 0xff;
 		sb_data_e1x.common.p_func.vf_valid = false;
@@ -4108,19 +4731,7 @@
 	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
 }
 
-static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
-					u8 sb_index, u8 disable, u16 usec)
-{
-	int port = BP_PORT(bp);
-	u8 ticks = usec / BNX2X_BTR;
-
-	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
-
-	disable = disable ? 1 : (usec ? 0 : 1);
-	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
-}
-
-static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
 				     u16 tx_usec, u16 rx_usec)
 {
 	bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
@@ -4167,7 +4778,7 @@
 			bp->attn_group[index].sig[sindex] =
 			   REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
 
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			/*
 			 * enable5 is separate from the rest of the registers,
 			 * and therefore the address skip is 4
@@ -4185,7 +4796,7 @@
 
 		REG_WR(bp, reg_offset, U64_LO(section));
 		REG_WR(bp, reg_offset + 4, U64_HI(section));
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
 		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
 	}
@@ -4195,6 +4806,7 @@
 
 	bnx2x_zero_sp_sb(bp);
 
+	sp_sb_data.state		= SB_ENABLED;
 	sp_sb_data.host_sb_addr.lo	= U64_LO(section);
 	sp_sb_data.host_sb_addr.hi	= U64_HI(section);
 	sp_sb_data.igu_sb_id		= igu_sp_sb_index;
@@ -4205,9 +4817,6 @@
 
 	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
 
-	bp->stats_pending = 0;
-	bp->set_mac_pending = 0;
-
 	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
 }
 
@@ -4253,146 +4862,129 @@
 		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 }
 
-void bnx2x_push_indir_table(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-	int i;
 
-	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			 unsigned long rx_mode_flags,
+			 unsigned long rx_accept_flags,
+			 unsigned long tx_accept_flags,
+			 unsigned long ramrod_flags)
+{
+	struct bnx2x_rx_mode_ramrod_params ramrod_param;
+	int rc;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Prepare ramrod parameters */
+	ramrod_param.cid = 0;
+	ramrod_param.cl_id = cl_id;
+	ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+	ramrod_param.func_id = BP_FUNC(bp);
+
+	ramrod_param.pstate = &bp->sp_state;
+	ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+	ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+	ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.rx_mode_flags = rx_mode_flags;
+
+	ramrod_param.rx_accept_flags = rx_accept_flags;
+	ramrod_param.tx_accept_flags = tx_accept_flags;
+
+	rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+	if (rc < 0) {
+		BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
 		return;
-
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		REG_WR8(bp, BAR_TSTRORM_INTMEM +
-			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-			bp->fp->cl_id + bp->rx_indir_table[i]);
+	}
 }
 
-static void bnx2x_init_ind_table(struct bnx2x *bp)
-{
-	int i;
-
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
-
-	bnx2x_push_indir_table(bp);
-}
-
+/* called with netif_addr_lock_bh() */
 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 {
-	int mode = bp->rx_mode;
-	int port = BP_PORT(bp);
-	u16 cl_id;
-	u32 def_q_filters = 0;
+	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 
-	/* All but management unicast packets should pass to the host as well */
-	u32 llh_mask =
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
-
-	switch (mode) {
-	case BNX2X_RX_MODE_NONE: /* no Rx */
-		def_q_filters = BNX2X_ACCEPT_NONE;
 #ifdef BCM_CNIC
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
-		}
-#endif
-		break;
+	if (!NO_FCOE(bp))
 
+		/* Configure rx_mode of FCoE Queue */
+		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+#endif
+
+	switch (bp->rx_mode) {
+	case BNX2X_RX_MODE_NONE:
+		/*
+		 * 'drop all' supersedes any accept flags that may have been
+		 * passed to the function.
+		 */
+		break;
 	case BNX2X_RX_MODE_NORMAL:
-		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
-				BNX2X_ACCEPT_MULTICAST;
-#ifdef BCM_CNIC
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id,
-						  BNX2X_ACCEPT_UNICAST |
-						  BNX2X_ACCEPT_MULTICAST);
-		}
-#endif
-		break;
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		break;
 	case BNX2X_RX_MODE_ALLMULTI:
-		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
-				BNX2X_ACCEPT_ALL_MULTICAST;
-#ifdef BCM_CNIC
-		/*
-		 *  Prevent duplication of multicast packets by configuring FCoE
-		 *  L2 Client to receive only matched unicast frames.
-		 */
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id,
-						  BNX2X_ACCEPT_UNICAST);
-		}
-#endif
-		break;
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		break;
 	case BNX2X_RX_MODE_PROMISC:
-		def_q_filters |= BNX2X_PROMISCUOUS_MODE;
-#ifdef BCM_CNIC
-		/*
-		 *  Prevent packets duplication by configuring DROP_ALL for FCoE
-		 *  L2 Client.
+		/* According to deffinition of SI mode, iface in promisc mode
+		 * should receive matched and unmatched (in resolution of port)
+		 * unicast packets.
 		 */
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
-		}
-#endif
-		/* pass management unicast packets as well */
-		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
-		break;
+		__set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 
-	default:
-		BNX2X_ERR("BAD rx mode (%d)\n", mode);
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		if (IS_MF_SI(bp))
+			__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+		else
+			__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+
 		break;
+	default:
+		BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+		return;
 	}
 
-	cl_id = BP_L_ID(bp);
-	bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
+	if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+	}
 
-	REG_WR(bp,
-	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
-		       NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
+	__set_bit(RAMROD_RX, &ramrod_flags);
+	__set_bit(RAMROD_TX, &ramrod_flags);
 
-	DP(NETIF_MSG_IFUP, "rx mode %d\n"
-		"drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
-		"accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
-		"unmatched_ucast 0x%x\n", mode,
-		bp->mac_filters.ucast_drop_all,
-		bp->mac_filters.mcast_drop_all,
-		bp->mac_filters.bcast_drop_all,
-		bp->mac_filters.ucast_accept_all,
-		bp->mac_filters.mcast_accept_all,
-		bp->mac_filters.bcast_accept_all,
-		bp->mac_filters.unmatched_unicast
-	);
-
-	storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+	bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+			    tx_accept_flags, ramrod_flags);
 }
 
 static void bnx2x_init_internal_common(struct bnx2x *bp)
 {
 	int i;
 
-	if (!CHIP_IS_E1(bp)) {
-
-		/* xstorm needs to know whether to add  ovlan to packets or not,
-		 * in switch-independent we'll write 0 to here... */
-		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
-			bp->mf_mode);
-		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
-			bp->mf_mode);
-		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
-			bp->mf_mode);
-		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
-			bp->mf_mode);
-	}
-
 	if (IS_MF_SI(bp))
 		/*
 		 * In switch independent mode, the TSTORM needs to accept
@@ -4401,25 +4993,22 @@
 		 */
 		REG_WR8(bp, BAR_TSTRORM_INTMEM +
 			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+	else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+		REG_WR8(bp, BAR_TSTRORM_INTMEM +
+			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
 
 	/* Zero this manually as its initialization is
 	   currently missing in the initTool */
 	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
 		REG_WR(bp, BAR_USTRORM_INTMEM +
 		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
 			CHIP_INT_MODE_IS_BC(bp) ?
 			HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
 	}
 }
 
-static void bnx2x_init_internal_port(struct bnx2x *bp)
-{
-	/* port */
-	bnx2x_dcb_init_intmem_pfc(bp);
-}
-
 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
 {
 	switch (load_code) {
@@ -4429,7 +5018,7 @@
 		/* no break */
 
 	case FW_MSG_CODE_DRV_LOAD_PORT:
-		bnx2x_init_internal_port(bp);
+		/* nothing to do */
 		/* no break */
 
 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
@@ -4443,31 +5032,57 @@
 	}
 }
 
-static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->igu_base_sb + fp->index + CNIC_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->base_fw_ndsb + fp->index + CNIC_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return BP_L_ID(fp->bp) + fp->index;
+	else	/* We want Client ID to be the same as IGU SB ID for 57712 */
+		return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_fp(struct bnx2x *bp, int fp_idx)
 {
 	struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
-
-	fp->state = BNX2X_FP_STATE_CLOSED;
+	unsigned long q_type = 0;
 
 	fp->cid = fp_idx;
-	fp->cl_id = BP_L_ID(bp) + fp_idx;
-	fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
-	fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
+	fp->cl_id = bnx2x_fp_cl_id(fp);
+	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+	fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
 	/* qZone id equals to FW (per path) client id */
-	fp->cl_qzone_id  = fp->cl_id +
-			   BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
-				ETH_MAX_RX_CLIENTS_E1H);
+	fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
+
 	/* init shortcut */
-	fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
-			    USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
-			    USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+	fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
 	/* Setup SB indicies */
 	fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
 	fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
 
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp),
+		bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata),
+			      q_type);
+
+	/**
+	 * Configure classification DBs: Always enable Tx switching
+	 */
+	bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
 	DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
 				   "cl_id %d  fw_sb %d  igu_sb %d\n",
-		   fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
+		   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 		   fp->igu_sb_id);
 	bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
 		      fp->fw_sb_id, fp->igu_sb_id);
@@ -4480,17 +5095,21 @@
 	int i;
 
 	for_each_eth_queue(bp, i)
-		bnx2x_init_fp_sb(bp, i);
+		bnx2x_init_fp(bp, i);
 #ifdef BCM_CNIC
 	if (!NO_FCOE(bp))
 		bnx2x_init_fcoe_fp(bp);
 
 	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
 		      BNX2X_VF_ID_INVALID, false,
-		      CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
+		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
 
 #endif
 
+	/* Initialize MOD_ABS interrupts */
+	bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+			       bp->common.shmem_base, bp->common.shmem2_base,
+			       BP_PORT(bp));
 	/* ensure status block indices were read */
 	rmb();
 
@@ -4502,12 +5121,8 @@
 	bnx2x_init_eq_ring(bp);
 	bnx2x_init_internal(bp, load_code);
 	bnx2x_pf_init(bp);
-	bnx2x_init_ind_table(bp);
 	bnx2x_stats_init(bp);
 
-	/* At this point, we are ready for interrupts */
-	atomic_set(&bp->intr_sem, 0);
-
 	/* flush all before enabling interrupts */
 	mb();
 	mmiowb();
@@ -4711,8 +5326,8 @@
 	msleep(50);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 	msleep(50);
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 
 	DP(NETIF_MSG_HW, "part2\n");
 
@@ -4776,8 +5391,8 @@
 	msleep(50);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
 	msleep(50);
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 #ifndef BCM_CNIC
 	/* set NIC mode */
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
@@ -4797,7 +5412,7 @@
 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 {
 	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
 	else
 		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
@@ -4831,7 +5446,7 @@
 
 	if (CHIP_REV_IS_FPGA(bp))
 		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
-	else if (CHIP_IS_E2(bp))
+	else if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
 			   (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
 				| PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
@@ -4844,7 +5459,11 @@
 	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
 	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
 /*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
-/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
+
+	if (!CHIP_IS_E1x(bp))
+		/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+		REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
 	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 /*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
@@ -4853,10 +5472,24 @@
 
 static void bnx2x_reset_common(struct bnx2x *bp)
 {
+	u32 val = 0x1400;
+
 	/* reset_common */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 	       0xd3ffff7f);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
+
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+	bp->dmae_ready = 0;
+	spin_lock_init(&bp->dmae_lock);
 }
 
 static void bnx2x_init_pxp(struct bnx2x *bp)
@@ -4973,7 +5606,7 @@
 	DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
 }
 
-static void bnx2x_pf_disable(struct bnx2x *bp)
+void bnx2x_pf_disable(struct bnx2x *bp)
 {
 	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 	val &= ~IGU_PF_CONF_FUNC_EN;
@@ -4983,22 +5616,48 @@
 	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
 }
 
-static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
 {
-	u32 val, i;
+	u32 shmem_base[2], shmem2_base[2];
+	shmem_base[0] =  bp->common.shmem_base;
+	shmem2_base[0] = bp->common.shmem2_base;
+	if (!CHIP_IS_E1x(bp)) {
+		shmem_base[1] =
+			SHMEM2_RD(bp, other_shmem_base_addr);
+		shmem2_base[1] =
+			SHMEM2_RD(bp, other_shmem2_base_addr);
+	}
+	bnx2x_acquire_phy_lock(bp);
+	bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+			      bp->common.chip_id);
+	bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+	u32 val;
 
 	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
 
 	bnx2x_reset_common(bp);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
 
-	bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
-	if (!CHIP_IS_E1(bp))
-		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
+	val = 0xfffc;
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
 
-	if (CHIP_IS_E2(bp)) {
-		u8 fid;
+	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		u8 abs_func_id;
 
 		/**
 		 * 4-port mode or 2-port mode we need to turn of master-enable
@@ -5007,29 +5666,30 @@
 		 * for all functions on the given path, this means 0,2,4,6 for
 		 * path 0 and 1,3,5,7 for path 1
 		 */
-		for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
-			if (fid == BP_ABS_FUNC(bp)) {
+		for (abs_func_id = BP_PATH(bp);
+		     abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+			if (abs_func_id == BP_ABS_FUNC(bp)) {
 				REG_WR(bp,
 				    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
 				    1);
 				continue;
 			}
 
-			bnx2x_pretend_func(bp, fid);
+			bnx2x_pretend_func(bp, abs_func_id);
 			/* clear pf enable */
 			bnx2x_pf_disable(bp);
 			bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 		}
 	}
 
-	bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
 	if (CHIP_IS_E1(bp)) {
 		/* enable HW interrupt from PXP on USDM overflow
 		   bit 16 on INT_MASK_0 */
 		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 	}
 
-	bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
 	bnx2x_init_pxp(bp);
 
 #ifdef __BIG_ENDIAN
@@ -5072,7 +5732,69 @@
 	 * This needs to be done by the first PF that is loaded in a path
 	 * (i.e. common phase)
 	 */
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ *	1.  First PF driver which loads on a path will:
+ *		a.  After taking the chip out of reset, by using pretend,
+ *		    it will write "0" to the following registers of
+ *		    the other vnics.
+ *		    REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ *		    REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ *		    REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ *		    And for itself it will write '1' to
+ *		    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ *		    dmae-operations (writing to pram for example.)
+ *		    note: can be done for only function 6,7 but cleaner this
+ *			  way.
+ *		b.  Write zero+valid to the entire ILT.
+ *		c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ *		    VNIC3 (of that port). The range allocated will be the
+ *		    entire ILT. This is needed to prevent  ILT range error.
+ *	2.  Any PF driver load flow:
+ *		a.  ILT update with the physical addresses of the allocated
+ *		    logical pages.
+ *		b.  Wait 20msec. - note that this timeout is needed to make
+ *		    sure there are no requests in one of the PXP internal
+ *		    queues with "old" ILT addresses.
+ *		c.  PF enable in the PGLC.
+ *		d.  Clear the was_error of the PF in the PGLC. (could have
+ *		    occured while driver was down)
+ *		e.  PF enable in the CFC (WEAK + STRONG)
+ *		f.  Timers scan enable
+ *	3.  PF driver unload flow:
+ *		a.  Clear the Timers scan_en.
+ *		b.  Polling for scan_on=0 for that PF.
+ *		c.  Clear the PF enable bit in the PXP.
+ *		d.  Clear the PF enable in the CFC (WEAK + STRONG)
+ *		e.  Write zero+valid to all ILT entries (The valid bit must
+ *		    stay set)
+ *		f.  If this is VNIC 3 of a port then also init
+ *		    first_timers_ilt_entry to zero and last_timers_ilt_entry
+ *		    to the last enrty in the ILT.
+ *
+ *	Notes:
+ *	Currently the PF error in the PGLC is non recoverable.
+ *	In the future the there will be a recovery routine for this error.
+ *	Currently attention is masked.
+ *	Having an MCP lock on the load/unload process does not guarantee that
+ *	there is no Timer disable during Func6/7 enable. This is because the
+ *	Timers scan is currently being cleared by the MCP on FLR.
+ *	Step 2.d can be done only for PF6/7 and the driver can also check if
+ *	there is error before clearing it. But the flow above is simpler and
+ *	more general.
+ *	All ILT entries are written by zero+valid and not just PF6/7
+ *	ILT entries since in the future the ILT entries allocation for
+ *	PF-s might be dynamic.
+ */
 		struct ilt_client_info ilt_cli;
 		struct bnx2x_ilt ilt;
 		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
@@ -5086,7 +5808,7 @@
 		/* Step 1: set zeroes to all ilt page entries with valid bit on
 		 * Step 2: set the timers first/last ilt entry to point
 		 * to the entire range to prevent ILT range error for 3rd/4th
-		 * vnic	(this code assumes existence of the vnic)
+		 * vnic	(this code assumes existance of the vnic)
 		 *
 		 * both steps performed by call to bnx2x_ilt_client_init_op()
 		 * with dummy TM client
@@ -5107,12 +5829,12 @@
 	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
 	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
 				(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
-		bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
+		bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
 
-		bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
+		bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
 
 		/* let the HW do it's magic ... */
 		do {
@@ -5126,26 +5848,27 @@
 		}
 	}
 
-	bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
 
 	/* clean the DMAE memory */
 	bp->dmae_ready = 1;
-	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
+	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
 
-	bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
 
 	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
 	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
 	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
 	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
 
-	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
 
-	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
 
 	/* QM queues pointers table */
 	bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
@@ -5155,57 +5878,51 @@
 	REG_WR(bp, QM_REG_SOFT_RESET, 0);
 
 #ifdef BCM_CNIC
-	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
 #endif
 
-	bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
 	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
-
-	if (!CHIP_REV_IS_SLOW(bp)) {
+	if (!CHIP_REV_IS_SLOW(bp))
 		/* enable hw interrupt from doorbell Q */
 		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
-	}
 
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	if (CHIP_MODE_IS_4_PORT(bp)) {
-		REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
-		REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
-	}
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
 
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
 	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
-#ifndef BCM_CNIC
-	/* set NIC mode */
-	REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-	if (!CHIP_IS_E1(bp))
-		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
 
-	if (CHIP_IS_E2(bp)) {
-		/* Bit-map indicating which L2 hdrs may appear after the
-		   basic Ethernet header */
-		int has_ovlan = IS_MF_SD(bp);
-		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
-		REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+		/* Bit-map indicating which L2 hdrs may appear
+		 * after the basic Ethernet header
+		 */
+		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+		       bp->path_has_ovlan ? 7 : 6);
+
+	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* reset VFC memories */
+		REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+		REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+
+		msleep(20);
 	}
 
-	bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
-
-	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-
-	bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
-
-	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
 
 	/* sync semi rtc */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
@@ -5213,21 +5930,18 @@
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
 	       0x80000000);
 
-	bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 
-	if (CHIP_IS_E2(bp)) {
-		int has_ovlan = IS_MF_SD(bp);
-		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
-		REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
-	}
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+		       bp->path_has_ovlan ? 7 : 6);
 
 	REG_WR(bp, SRC_REG_SOFT_RST, 1);
-	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
-		REG_WR(bp, i, random32());
 
-	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
 #ifdef BCM_CNIC
 	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
 	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
@@ -5248,11 +5962,11 @@
 					  "of cdu_context(%ld)\n",
 			 (long)sizeof(union cdu_context));
 
-	bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
 	val = (4 << 24) + (0 << 12) + 1024;
 	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
 
-	bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
 	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
 	/* enable context validation interrupt from CFC */
 	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
@@ -5260,20 +5974,19 @@
 	/* set the thresholds to prevent CFC/CDU race */
 	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
 
-	bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
 
-	if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
+	if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
 		REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
 
-	bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
 
-	bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
 	/* Reset PCIE errors for debug */
 	REG_WR(bp, 0x2814, 0xffffffff);
 	REG_WR(bp, 0x3820, 0xffffffff);
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
 			   (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
 				PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
@@ -5287,21 +6000,15 @@
 				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
 	}
 
-	bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
-
-	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
 	if (!CHIP_IS_E1(bp)) {
-		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+		/* in E3 this done in per-port section */
+		if (!CHIP_IS_E3(bp))
+			REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (CHIP_IS_E1H(bp))
+		/* not applicable for E2 (and above ...) */
 		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
-	}
-	if (CHIP_IS_E2(bp)) {
-		/* Bit-map indicating which L2 hdrs may appear after the
-		   basic Ethernet header */
-		REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
-	}
 
 	if (CHIP_REV_IS_SLOW(bp))
 		msleep(200);
@@ -5343,127 +6050,136 @@
 	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 
 	bnx2x_enable_blocks_attention(bp);
-	if (CHIP_PARITY_ENABLED(bp))
-		bnx2x_enable_blocks_parity(bp);
+	bnx2x_enable_blocks_parity(bp);
 
 	if (!BP_NOMCP(bp)) {
-		/* In E2 2-PORT mode, same ext phy is used for the two paths */
-		if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
-		    CHIP_IS_E1x(bp)) {
-			u32 shmem_base[2], shmem2_base[2];
-			shmem_base[0] =  bp->common.shmem_base;
-			shmem2_base[0] = bp->common.shmem2_base;
-			if (CHIP_IS_E2(bp)) {
-				shmem_base[1] =
-					SHMEM2_RD(bp, other_shmem_base_addr);
-				shmem2_base[1] =
-					SHMEM2_RD(bp, other_shmem2_base_addr);
-			}
-			bnx2x_acquire_phy_lock(bp);
-			bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
-					      bp->common.chip_id);
-			bnx2x_release_phy_lock(bp);
-		}
+		if (CHIP_IS_E1x(bp))
+			bnx2x__common_init_phy(bp);
 	} else
 		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
 	return 0;
 }
 
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+	int rc = bnx2x_init_hw_common(bp);
+
+	if (rc)
+		return rc;
+
+	/* In E2 2-PORT mode, same ext phy is used for the two paths */
+	if (!BP_NOMCP(bp))
+		bnx2x__common_init_phy(bp);
+
+	return 0;
+}
+
 static int bnx2x_init_hw_port(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
-	int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
+	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
 	u32 low, high;
 	u32 val;
 
+	bnx2x__link_reset(bp);
+
 	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
 
 	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 
-	bnx2x_init_block(bp, PXP_BLOCK, init_stage);
-	bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
 
 	/* Timers bug workaround: disables the pf_master bit in pglue at
 	 * common phase, we need to enable it here before any dmae access are
 	 * attempted. Therefore we manually added the enable-master to the
 	 * port phase (it also happens in the function phase)
 	 */
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
 
-	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
 
 	/* QM cid (connection) count */
 	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
 
 #ifdef BCM_CNIC
-	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_TM, init_phase);
 	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
 	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
 #endif
 
-	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
-
-	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 
 	if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
-		bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
-		if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
-			/* no pause for emulation and FPGA */
-			low = 0;
-			high = 513;
-		} else {
-			if (IS_MF(bp))
-				low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
-			else if (bp->dev->mtu > 4096) {
-				if (bp->flags & ONE_PORT_FLAG)
-					low = 160;
-				else {
-					val = bp->dev->mtu;
-					/* (24*1024 + val*4)/256 */
-					low = 96 + (val/64) +
-							((val % 64) ? 1 : 0);
-				}
-			} else
-				low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
-			high = low + 56;	/* 14*1024/256 */
-		}
+		bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+		if (IS_MF(bp))
+			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+		else if (bp->dev->mtu > 4096) {
+			if (bp->flags & ONE_PORT_FLAG)
+				low = 160;
+			else {
+				val = bp->dev->mtu;
+				/* (24*1024 + val*4)/256 */
+				low = 96 + (val/64) +
+						((val % 64) ? 1 : 0);
+			}
+		} else
+			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+		high = low + 56;	/* 14*1024/256 */
 		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
 		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
 	}
 
-	if (CHIP_MODE_IS_4_PORT(bp)) {
-		REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
-		REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
-		REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
-					  BRB1_REG_MAC_GUARANTIED_0), 40);
-	}
-
-	bnx2x_init_block(bp, PRS_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, USDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, USEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
 	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
+		REG_WR(bp, (BP_PORT(bp) ?
+			    BRB1_REG_MAC_GUARANTIED_1 :
+			    BRB1_REG_MAC_GUARANTIED_0), 40);
 
-	bnx2x_init_block(bp, UPB_BLOCK, init_stage);
-	bnx2x_init_block(bp, XPB_BLOCK, init_stage);
 
-	bnx2x_init_block(bp, PBF_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	if (CHIP_IS_E3B0(bp))
+		/* Ovlan exists only if we are in multi-function +
+		 * switch-dependent mode, in switch-independent there
+		 * is no ovlan headers
+		 */
+		REG_WR(bp, BP_PORT(bp) ?
+		       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+		       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+		       (bp->path_has_ovlan ? 7 : 6));
 
-	if (!CHIP_IS_E2(bp)) {
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+	if (CHIP_IS_E1x(bp)) {
 		/* configure PBF to work without PAUSE mtu 9000 */
 		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 
@@ -5479,20 +6195,20 @@
 	}
 
 #ifdef BCM_CNIC
-	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
 #endif
-	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
-	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 
 	if (CHIP_IS_E1(bp)) {
 		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 	}
-	bnx2x_init_block(bp, HC_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_HC, init_phase);
 
-	bnx2x_init_block(bp, IGU_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 
-	bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 	/* init aeu_mask_attn_func_0/1:
 	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
 	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
@@ -5502,22 +6218,31 @@
 	val |= CHIP_IS_E1(bp) ? 0 : 0x10;
 	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 
-	bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
-	bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
-	bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
-	bnx2x_init_block(bp, DBU_BLOCK, init_stage);
-	bnx2x_init_block(bp, DBG_BLOCK, init_stage);
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 
-	bnx2x_init_block(bp, NIG_BLOCK, init_stage);
+	if (!CHIP_IS_E1x(bp)) {
+		/* Bit-map indicating which L2 hdrs may appear after the
+		 * basic Ethernet header
+		 */
+		REG_WR(bp, BP_PORT(bp) ?
+			   NIG_REG_P1_HDRS_AFTER_BASIC :
+			   NIG_REG_P0_HDRS_AFTER_BASIC,
+			   IS_MF_SD(bp) ? 7 : 6);
 
-	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+		if (CHIP_IS_E3(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+				   NIG_REG_LLH1_MF_MODE :
+				   NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 
 	if (!CHIP_IS_E1(bp)) {
 		/* 0x2 disable mf_ov, 0x1 enable */
 		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
 		       (IS_MF_SD(bp) ? 0x1 : 0x2));
 
-		if (CHIP_IS_E2(bp)) {
+		if (!CHIP_IS_E1x(bp)) {
 			val = 0;
 			switch (bp->mf_mode) {
 			case MULTI_FUNCTION_SD:
@@ -5538,17 +6263,16 @@
 		}
 	}
 
-	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
-	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-	if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
-				      bp->common.shmem2_base, port)) {
+
+	/* If SPIO5 is set to generate interrupts, enable it for this port */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	if (val & (1 << MISC_REGISTERS_SPIO_5)) {
 		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
 				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 		val = REG_RD(bp, reg_addr);
 		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
 		REG_WR(bp, reg_addr, val);
 	}
-	bnx2x__link_reset(bp);
 
 	return 0;
 }
@@ -5567,7 +6291,7 @@
 
 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
 {
-	bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
+	bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
 }
 
 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
@@ -5581,6 +6305,7 @@
 {
 	int port = BP_PORT(bp);
 	int func = BP_FUNC(bp);
+	int init_phase = PHASE_PF0 + func;
 	struct bnx2x_ilt *ilt = BP_ILT(bp);
 	u16 cdu_ilt_start;
 	u32 addr, val;
@@ -5589,6 +6314,10 @@
 
 	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 
+	/* FLR cleanup - hmmm */
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_pf_flr_clnup(bp);
+
 	/* set MSI reconfigure capability */
 	if (bp->common.int_block == INT_BLOCK_HC) {
 		addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -5597,6 +6326,9 @@
 		REG_WR(bp, addr, val);
 	}
 
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
 	ilt = BP_ILT(bp);
 	cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
 
@@ -5622,7 +6354,7 @@
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 #endif  /* BCM_CNIC */
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		u32 pf_conf = IGU_PF_CONF_FUNC_EN;
 
 		/* Turn on a single ISR mode in IGU if driver is going to use
@@ -5649,58 +6381,55 @@
 
 	bp->dmae_ready = 1;
 
-	bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
 
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
 
-	bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
 
-	if (CHIP_IS_E2(bp)) {
-		REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
-								BP_PATH(bp));
-		REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
-								BP_PATH(bp));
-	}
-
-	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
-
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, QM_REG_PF_EN, 1);
 
-	bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+	}
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
 
-	if (CHIP_MODE_IS_4_PORT(bp))
-		bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
-
-	bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
-	if (CHIP_IS_E2(bp))
+	bnx2x_init_block(bp, BLOCK_TM, init_phase);
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, PBF_REG_DISABLE_PF, 0);
 
-	bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
 
-	bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
 
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
 
 	if (IS_MF(bp)) {
@@ -5708,7 +6437,7 @@
 		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
 	}
 
-	bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
 
 	/* HC init per function */
 	if (bp->common.int_block == INT_BLOCK_HC) {
@@ -5718,21 +6447,21 @@
 			REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
 			REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 		}
-		bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, BLOCK_HC, init_phase);
 
 	} else {
 		int num_segs, sb_idx, prod_offset;
 
 		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
 
-		if (CHIP_IS_E2(bp)) {
+		if (!CHIP_IS_E1x(bp)) {
 			REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
 			REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
 		}
 
-		bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, BLOCK_IGU, init_phase);
 
-		if (CHIP_IS_E2(bp)) {
+		if (!CHIP_IS_E1x(bp)) {
 			int dsb_idx = 0;
 			/**
 			 * Producer memory:
@@ -5827,13 +6556,6 @@
 	REG_WR(bp, 0x2114, 0xffffffff);
 	REG_WR(bp, 0x2120, 0xffffffff);
 
-	bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
-	bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
-
 	if (CHIP_IS_E1x(bp)) {
 		main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
 		main_mem_base = HC_REG_MAIN_MEMORY +
@@ -5859,65 +6581,26 @@
 		REG_RD(bp, main_mem_prty_clr);
 	}
 
+#ifdef BNX2X_STOP_ON_ERROR
+	/* Enable STORMs SP logging */
+	REG_WR8(bp, BAR_USTRORM_INTMEM +
+	       USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
 	bnx2x_phy_probe(&bp->link_params);
 
 	return 0;
 }
 
-int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
-{
-	int rc = 0;
-
-	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
-	   BP_ABS_FUNC(bp), load_code);
-
-	bp->dmae_ready = 0;
-	spin_lock_init(&bp->dmae_lock);
-
-	switch (load_code) {
-	case FW_MSG_CODE_DRV_LOAD_COMMON:
-	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
-		rc = bnx2x_init_hw_common(bp, load_code);
-		if (rc)
-			goto init_hw_err;
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_PORT:
-		rc = bnx2x_init_hw_port(bp);
-		if (rc)
-			goto init_hw_err;
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
-		rc = bnx2x_init_hw_func(bp);
-		if (rc)
-			goto init_hw_err;
-		break;
-
-	default:
-		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
-		break;
-	}
-
-	if (!BP_NOMCP(bp)) {
-		int mb_idx = BP_FW_MB_IDX(bp);
-
-		bp->fw_drv_pulse_wr_seq =
-				(SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
-				 DRV_PULSE_SEQ_MASK);
-		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-	}
-
-init_hw_err:
-	bnx2x_gunzip_end(bp);
-
-	return rc;
-}
 
 void bnx2x_free_mem(struct bnx2x *bp)
 {
-	bnx2x_gunzip_end(bp);
-
 	/* fastpath */
 	bnx2x_free_fp_mem(bp);
 	/* end of fastpath */
@@ -5925,6 +6608,9 @@
 	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
 		       sizeof(struct host_sp_status_block));
 
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
 	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
 		       sizeof(struct bnx2x_slowpath));
 
@@ -5936,7 +6622,7 @@
 	BNX2X_FREE(bp->ilt->lines);
 
 #ifdef BCM_CNIC
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
 			       sizeof(struct host_hc_status_block_e2));
 	else
@@ -5950,18 +6636,67 @@
 
 	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
 		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
 
-	BNX2X_FREE(bp->rx_indir_table);
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+	int num_groups;
+
+	/* number of eth_queues */
+	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/* Total number of FW statistics requests =
+	 * 1 for port stats + 1 for PF stats + num_eth_queues */
+	bp->fw_stats_num = 2 + num_queue_stats;
+
+
+	/* Request is built from stats_query_header and an array of
+	 * stats_query_cmd_group each of which contains
+	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+	 * configured in the stats_query_header.
+	 */
+	num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+		(((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+			num_groups * sizeof(struct stats_query_cmd_group);
+
+	/* Data for statistics requests + stats_conter
+	 *
+	 * stats_counter holds per-STORM counters that are incremented
+	 * when STORM has finished with the current request.
+	 */
+	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+		sizeof(struct per_pf_stats) +
+		sizeof(struct per_queue_stats) * num_queue_stats +
+		sizeof(struct stats_counter);
+
+	BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+			bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+	/* Set shortcuts */
+	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+				   bp->fw_stats_req_sz;
+	return 0;
+
+alloc_mem_err:
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+	return -ENOMEM;
 }
 
 
 int bnx2x_alloc_mem(struct bnx2x *bp)
 {
-	if (bnx2x_gunzip_init(bp))
-		return -ENOMEM;
-
 #ifdef BCM_CNIC
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
+		/* size = the status block + ramrod buffers */
 		BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
 				sizeof(struct host_hc_status_block_e2));
 	else
@@ -5979,6 +6714,10 @@
 	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
 			sizeof(struct bnx2x_slowpath));
 
+	/* Allocated memory for FW statistics  */
+	if (bnx2x_alloc_fw_stats_mem(bp))
+		goto alloc_mem_err;
+
 	bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
 
 	BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
@@ -5996,8 +6735,6 @@
 	BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
 			BCM_PAGE_SIZE * NUM_EQ_PAGES);
 
-	BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
-		    TSTORM_INDIRECTION_TABLE_SIZE);
 
 	/* fastpath */
 	/* need to be done at the end, since it's self adjusting to amount
@@ -6015,631 +6752,77 @@
 /*
  * Init service functions
  */
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-			     int *state_p, int flags);
 
-int bnx2x_func_start(struct bnx2x *bp)
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags)
 {
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
 
-	/* Wait for completion */
-	return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
-				 WAIT_RAMROD_COMMON);
-}
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
 
-static int bnx2x_func_stop(struct bnx2x *bp)
-{
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
 
-	/* Wait for completion */
-	return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
-				      0, &(bp->state), WAIT_RAMROD_COMMON);
-}
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
 
-/**
- * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
- *
- * @bp:		driver handle
- * @set:	set or clear an entry (1 or 0)
- * @mac:	pointer to a buffer containing a MAC
- * @cl_bit_vec:	bit vector of clients to register a MAC for
- * @cam_offset:	offset in a CAM to use
- * @is_bcast:	is the set MAC a broadcast address (for E1 only)
- */
-static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
-				   u32 cl_bit_vec, u8 cam_offset,
-				   u8 is_bcast)
-{
-	struct mac_configuration_cmd *config =
-		(struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
-	int ramrod_flags = WAIT_RAMROD_COMMON;
+		__set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
 
-	bp->set_mac_pending = 1;
-
-	config->hdr.length = 1;
-	config->hdr.offset = cam_offset;
-	config->hdr.client_id = 0xff;
-	/* Mark the single MAC configuration ramrod as opposed to a
-	 * UC/MC list configuration).
-	 */
-	config->hdr.echo = 1;
-
-	/* primary MAC */
-	config->config_table[0].msb_mac_addr =
-					swab16(*(u16 *)&mac[0]);
-	config->config_table[0].middle_mac_addr =
-					swab16(*(u16 *)&mac[2]);
-	config->config_table[0].lsb_mac_addr =
-					swab16(*(u16 *)&mac[4]);
-	config->config_table[0].clients_bit_vector =
-					cpu_to_le32(cl_bit_vec);
-	config->config_table[0].vlan_id = 0;
-	config->config_table[0].pf_id = BP_FUNC(bp);
-	if (set)
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_SET);
-	else
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_INVALIDATE);
-
-	if (is_bcast)
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
-
-	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
-	   (set ? "setting" : "clearing"),
-	   config->config_table[0].msb_mac_addr,
-	   config->config_table[0].middle_mac_addr,
-	   config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
-
-	mb();
-
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
-
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
-}
-
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-			     int *state_p, int flags)
-{
-	/* can take a while if any port is running */
-	int cnt = 5000;
-	u8 poll = flags & WAIT_RAMROD_POLL;
-	u8 common = flags & WAIT_RAMROD_COMMON;
-
-	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
-	   poll ? "polling" : "waiting", state, idx);
-
-	might_sleep();
-	while (cnt--) {
-		if (poll) {
-			if (common)
-				bnx2x_eq_int(bp);
-			else {
-				bnx2x_rx_int(bp->fp, 10);
-				/* if index is different from 0
-				 * the reply for some commands will
-				 * be on the non default queue
-				 */
-				if (idx)
-					bnx2x_rx_int(&bp->fp[idx], 10);
-			}
-		}
-
-		mb(); /* state is changed by bnx2x_sp_event() */
-		if (*state_p == state) {
-#ifdef BNX2X_STOP_ON_ERROR
-			DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
-#endif
-			return 0;
-		}
-
-		msleep(1);
-
-		if (bp->panic)
-			return -EIO;
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 	}
 
-	/* timeout! */
-	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
-		  poll ? "polling" : "waiting", state, idx);
-#ifdef BNX2X_STOP_ON_ERROR
-	bnx2x_panic();
-#endif
-
-	return -EBUSY;
-}
-
-static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
-{
-	if (CHIP_IS_E1H(bp))
-		return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
-	else if (CHIP_MODE_IS_4_PORT(bp))
-		return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
-	else
-		return E2_FUNC_MAX * rel_offset + BP_VN(bp);
-}
-
-/**
- *  LLH CAM line allocations: currently only iSCSI and ETH macs are
- *  relevant. In addition, current implementation is tuned for a
- *  single ETH MAC.
- */
-enum {
-	LLH_CAM_ISCSI_ETH_LINE = 0,
-	LLH_CAM_ETH_LINE,
-	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
-};
-
-static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-			  int set,
-			  unsigned char *dev_addr,
-			  int index)
-{
-	u32 wb_data[2];
-	u32 mem_offset, ena_offset, mem_index;
-	/**
-	 * indexes mapping:
-	 * 0..7 - goes to MEM
-	 * 8..15 - goes to MEM2
-	 */
-
-	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
-		return;
-
-	/* calculate memory start offset according to the mapping
-	 * and index in the memory */
-	if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
-		mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
-					   NIG_REG_LLH0_FUNC_MEM;
-		ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
-					   NIG_REG_LLH0_FUNC_MEM_ENABLE;
-		mem_index = index;
-	} else {
-		mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
-					   NIG_REG_P0_LLH_FUNC_MEM2;
-		ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
-					   NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
-		mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
-	}
-
-	if (set) {
-		/* LLH_FUNC_MEM is a u64 WB register */
-		mem_offset += 8*mem_index;
-
-		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
-			      (dev_addr[4] <<  8) |  dev_addr[5]);
-		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
-
-		REG_WR_DMAE(bp, mem_offset, wb_data, 2);
-	}
-
-	/* enable/disable the entry */
-	REG_WR(bp, ena_offset + 4*mem_index, set);
-
-}
-
-void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
-{
-	u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
-			 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
-
-	/* networking  MAC */
-	bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
-			       (1 << bp->fp->cl_id), cam_offset , 0);
-
-	bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
-
-	if (CHIP_IS_E1(bp)) {
-		/* broadcast MAC */
-		static const u8 bcast[ETH_ALEN] = {
-			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-		};
-		bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
-	}
-}
-
-static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
-{
-	return CHIP_REV_IS_SLOW(bp) ?
-		(BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
-		(BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
-}
-
-/* set mc list, do not wait as wait implies sleep and
- * set_rx_mode can be invoked from non-sleepable context.
- *
- * Instead we use the same ramrod data buffer each time we need
- * to configure a list of addresses, and use the fact that the
- * list of MACs is changed in an incremental way and that the
- * function is called under the netif_addr_lock. A temporary
- * inconsistent CAM configuration (possible in case of a very fast
- * sequence of add/del/add on the host side) will shortly be
- * restored by the handler of the last ramrod.
- */
-static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
-{
-	int i = 0, old;
-	struct net_device *dev = bp->dev;
-	u8 offset = bnx2x_e1_cam_mc_offset(bp);
-	struct netdev_hw_addr *ha;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
-
-	if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
-		return -EINVAL;
-
-	netdev_for_each_mc_addr(ha, dev) {
-		/* copy mac */
-		config_cmd->config_table[i].msb_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
-		config_cmd->config_table[i].middle_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
-		config_cmd->config_table[i].lsb_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
-
-		config_cmd->config_table[i].vlan_id = 0;
-		config_cmd->config_table[i].pf_id = BP_FUNC(bp);
-		config_cmd->config_table[i].clients_bit_vector =
-			cpu_to_le32(1 << BP_L_ID(bp));
-
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_SET);
-
-		DP(NETIF_MSG_IFUP,
-		   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
-		   config_cmd->config_table[i].msb_mac_addr,
-		   config_cmd->config_table[i].middle_mac_addr,
-		   config_cmd->config_table[i].lsb_mac_addr);
-		i++;
-	}
-	old = config_cmd->hdr.length;
-	if (old > i) {
-		for (; i < old; i++) {
-			if (CAM_IS_INVALID(config_cmd->
-					   config_table[i])) {
-				/* already invalidated */
-				break;
-			}
-			/* invalidate */
-			SET_FLAG(config_cmd->config_table[i].flags,
-				MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-				T_ETH_MAC_COMMAND_INVALIDATE);
-		}
-	}
-
-	wmb();
-
-	config_cmd->hdr.length = i;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* Mark that this ramrod doesn't use bp->set_mac_pending for
-	 * synchronization.
-	 */
-	config_cmd->hdr.echo = 0;
-
-	mb();
-
-	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-}
-
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
-{
-	int i;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
-	int ramrod_flags = WAIT_RAMROD_COMMON;
-	u8 offset = bnx2x_e1_cam_mc_offset(bp);
-
-	for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_INVALIDATE);
-
-	wmb();
-
-	config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* We'll wait for a completion this time... */
-	config_cmd->hdr.echo = 1;
-
-	bp->set_mac_pending = 1;
-
-	mb();
-
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
-				ramrod_flags);
-
-}
-
-/* Accept one or more multicasts */
-static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
-{
-	struct net_device *dev = bp->dev;
-	struct netdev_hw_addr *ha;
-	u32 mc_filter[MC_HASH_SIZE];
-	u32 crc, bit, regidx;
-	int i;
-
-	memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
-	netdev_for_each_mc_addr(ha, dev) {
-		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-		   bnx2x_mc_addr(ha));
-
-		crc = crc32c_le(0, bnx2x_mc_addr(ha),
-				ETH_ALEN);
-		bit = (crc >> 24) & 0xff;
-		regidx = bit >> 5;
-		bit &= 0x1f;
-		mc_filter[regidx] |= (1 << bit);
-	}
-
-	for (i = 0; i < MC_HASH_SIZE; i++)
-		REG_WR(bp, MC_HASH_OFFSET(bp, i),
-		       mc_filter[i]);
-
-	return 0;
-}
-
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
-{
-	int i;
-
-	for (i = 0; i < MC_HASH_SIZE; i++)
-		REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-}
-
-#ifdef BCM_CNIC
-/**
- * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
- *
- * @bp:		driver handle
- * @set:	set or clear the CAM entry
- *
- * This function will wait until the ramdord completion returns.
- * Return 0 if success, -ENODEV if ramrod doesn't return.
- */
-static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
-{
-	u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
-			 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
-	u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
-		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
-	u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
-	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
-
-	/* Send a SET_MAC ramrod */
-	bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
-			       cam_offset, 0);
-
-	bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
-
-	return 0;
-}
-
-/**
- * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
- *
- * @bp:		driver handle
- * @set:	set or clear the CAM entry
- *
- * This function will wait until the ramrod completion returns.
- * Returns 0 if success, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
-{
-	u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
-	/**
-	 * CAM allocation for E1H
-	 * eth unicasts: by func number
-	 * iscsi: by func number
-	 * fip unicast: by func number
-	 * fip multicast: by func number
-	 */
-	bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
-		cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
-
-	return 0;
-}
-
-int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
-{
-	u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
-
-	/**
-	 * CAM allocation for E1H
-	 * eth unicasts: by func number
-	 * iscsi: by func number
-	 * fip unicast: by func number
-	 * fip multicast: by func number
-	 */
-	bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS,	cl_bit_vec,
-		bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
-
-	return 0;
-}
-#endif
-
-static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
-				    struct bnx2x_client_init_params *params,
-				    u8 activate,
-				    struct client_init_ramrod_data *data)
-{
-	/* Clear the buffer */
-	memset(data, 0, sizeof(*data));
-
-	/* general */
-	data->general.client_id = params->rxq_params.cl_id;
-	data->general.statistics_counter_id = params->rxq_params.stat_id;
-	data->general.statistics_en_flg =
-		(params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
-	data->general.is_fcoe_flg =
-		(params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
-	data->general.activate_flg = activate;
-	data->general.sp_client_id = params->rxq_params.spcl_id;
-
-	/* Rx data */
-	data->rx.tpa_en_flg =
-		(params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
-	data->rx.vmqueue_mode_en_flg = 0;
-	data->rx.cache_line_alignment_log_size =
-		params->rxq_params.cache_line_log;
-	data->rx.enable_dynamic_hc =
-		(params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
-	data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
-	data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
-	data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
-
-	/* We don't set drop flags */
-	data->rx.drop_ip_cs_err_flg = 0;
-	data->rx.drop_tcp_cs_err_flg = 0;
-	data->rx.drop_ttl0_flg = 0;
-	data->rx.drop_udp_cs_err_flg = 0;
-
-	data->rx.inner_vlan_removal_enable_flg =
-		(params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
-	data->rx.outer_vlan_removal_enable_flg =
-		(params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
-	data->rx.status_block_id = params->rxq_params.fw_sb_id;
-	data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
-	data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
-	data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
-	data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
-	data->rx.bd_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
-	data->rx.bd_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
-	data->rx.sge_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.sge_map));
-	data->rx.sge_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.sge_map));
-	data->rx.cqe_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
-	data->rx.cqe_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
-	data->rx.is_leading_rss =
-		(params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
-	data->rx.is_approx_mcast = data->rx.is_leading_rss;
-
-	/* Tx data */
-	data->tx.enforce_security_flg = 0; /* VF specific */
-	data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
-	data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
-	data->tx.mtu = 0; /* VF specific */
-	data->tx.tx_bd_page_base.lo =
-		cpu_to_le32(U64_LO(params->txq_params.dscr_map));
-	data->tx.tx_bd_page_base.hi =
-		cpu_to_le32(U64_HI(params->txq_params.dscr_map));
-
-	/* flow control data */
-	data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
-	data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
-	data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
-	data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
-	data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
-	data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
-	data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
-
-	data->fc.safc_group_num = params->txq_params.cos;
-	data->fc.safc_group_en_flg =
-		(params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
-	data->fc.traffic_type =
-		(params->ramrod_params.flags & CLIENT_IS_FCOE) ?
-		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
-}
-
-static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
-{
-	/* ustorm cxt validation */
-	cxt->ustorm_ag_context.cdu_usage =
-		CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
-				       ETH_CONNECTION_TYPE);
-	/* xcontext validation */
-	cxt->xstorm_ag_context.cdu_reserved =
-		CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
-				       ETH_CONNECTION_TYPE);
-}
-
-static int bnx2x_setup_fw_client(struct bnx2x *bp,
-				 struct bnx2x_client_init_params *params,
-				 u8 activate,
-				 struct client_init_ramrod_data *data,
-				 dma_addr_t data_mapping)
-{
-	u16 hc_usec;
-	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
-	int ramrod_flags = 0, rc;
-
-	/* HC and context validation values */
-	hc_usec = params->txq_params.hc_rate ?
-		1000000 / params->txq_params.hc_rate : 0;
-	bnx2x_update_coalesce_sb_index(bp,
-			params->txq_params.fw_sb_id,
-			params->txq_params.sb_cq_index,
-			!(params->txq_params.flags & QUEUE_FLG_HC),
-			hc_usec);
-
-	*(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
-
-	hc_usec = params->rxq_params.hc_rate ?
-		1000000 / params->rxq_params.hc_rate : 0;
-	bnx2x_update_coalesce_sb_index(bp,
-			params->rxq_params.fw_sb_id,
-			params->rxq_params.sb_cq_index,
-			!(params->rxq_params.flags & QUEUE_FLG_HC),
-			hc_usec);
-
-	bnx2x_set_ctx_validation(params->rxq_params.cxt,
-				 params->rxq_params.cid);
-
-	/* zero stats */
-	if (params->txq_params.flags & QUEUE_FLG_STATS)
-		storm_memset_xstats_zero(bp, BP_PORT(bp),
-					 params->txq_params.stat_id);
-
-	if (params->rxq_params.flags & QUEUE_FLG_STATS) {
-		storm_memset_ustats_zero(bp, BP_PORT(bp),
-					 params->rxq_params.stat_id);
-		storm_memset_tstats_zero(bp, BP_PORT(bp),
-					 params->rxq_params.stat_id);
-	}
-
-	/* Fill the ramrod data */
-	bnx2x_fill_cl_init_data(bp, params, activate, data);
-
-	/* SETUP ramrod.
-	 *
-	 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
-	 * barrier except from mmiowb() is needed to impose a
-	 * proper ordering of memory operations.
-	 */
-	mmiowb();
-
-
-	bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
-		      U64_HI(data_mapping), U64_LO(data_mapping), 0);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
-				 params->ramrod_params.index,
-				 params->ramrod_params.pstate,
-				 ramrod_flags);
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc < 0)
+		BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
 	return rc;
 }
 
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+	/* Wait for completion of requested */
+	if (wait_for_comp)
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+	/* Set the mac type of addresses we want to clear */
+	__set_bit(mac_type, &vlan_mac_flags);
+
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+	return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+	unsigned long ramrod_flags = 0;
+
+	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Eth MAC is set on RSS leading client (fp[0]) */
+	return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+				 BNX2X_ETH_MAC, &ramrod_flags);
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+	return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+}
+
 /**
  * bnx2x_set_int_mode - configure interrupt mode
  *
@@ -6647,11 +6830,9 @@
  *
  * In case of MSI-X it will also try to enable MSI-X.
  */
-static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
 {
-	int rc = 0;
-
-	switch (bp->int_mode) {
+	switch (int_mode) {
 	case INT_MODE_MSI:
 		bnx2x_enable_msi(bp);
 		/* falling through... */
@@ -6670,8 +6851,7 @@
 		 * so try to enable MSI-X with the requested number of fp's
 		 * and fallback to MSI or legacy INTx with one fp
 		 */
-		rc = bnx2x_enable_msix(bp);
-		if (rc) {
+		if (bnx2x_enable_msix(bp)) {
 			/* failed to enable MSI-X */
 			if (bp->multi_mode)
 				DP(NETIF_MSG_IFUP,
@@ -6682,14 +6862,12 @@
 				   1 + NONE_ETH_CONTEXT_USE);
 			bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
 
+			/* Try to enable MSI */
 			if (!(bp->flags & DISABLE_MSI_FLAG))
 				bnx2x_enable_msi(bp);
 		}
-
 		break;
 	}
-
-	return rc;
 }
 
 /* must be called prioir to any HW initializations */
@@ -6713,7 +6891,7 @@
 	ilt_client->page_size = CDU_ILT_PAGE_SZ;
 	ilt_client->flags = ILT_CLIENT_SKIP_MEM;
 	ilt_client->start = line;
-	line += L2_ILT_LINES(bp);
+	line += bnx2x_cid_ilt_lines(bp);
 #ifdef BCM_CNIC
 	line += CNIC_ILT_LINES;
 #endif
@@ -6793,12 +6971,72 @@
 #else
 	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
 #endif
+	BUG_ON(line > ILT_MAX_LINES);
 }
 
-int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-		       int is_leading)
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp:			driver handle
+ * @fp:			pointer to fastpath
+ * @init_params:	pointer to parameters structure
+ *
+ * parameters configured:
+ *      - HC configuration
+ *      - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 {
-	struct bnx2x_client_init_params params = { {0} };
+	/* FCoE Queue uses Default SB, thus has no HC capabilities */
+	if (!IS_FCOE_FP(fp)) {
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+		/* If HC is supporterd, enable host coalescing in the transition
+		 * to INIT state.
+		 */
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+		/* HC rate */
+		init_params->rx.hc_rate = bp->rx_ticks ?
+			(1000000 / bp->rx_ticks) : 0;
+		init_params->tx.hc_rate = bp->tx_ticks ?
+			(1000000 / bp->tx_ticks) : 0;
+
+		/* FW SB ID */
+		init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+			fp->fw_sb_id;
+
+		/*
+		 * CQ index among the SB indices: FCoE clients uses the default
+		 * SB, therefore it's different.
+		 */
+		init_params->rx.sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
+		init_params->tx.sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
+	}
+
+	init_params->cxt = &bp->context.vcxt[fp->cid].eth;
+}
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to fastpath
+ * @leading:	is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ *      actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading)
+{
+	struct bnx2x_queue_state_params q_params = {0};
+	struct bnx2x_queue_setup_params *setup_params =
+						&q_params.params.setup;
 	int rc;
 
 	/* reset IGU state skip FCoE L2 queue */
@@ -6806,79 +7044,73 @@
 		bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
 			     IGU_INT_ENABLE, 0);
 
-	params.ramrod_params.pstate = &fp->state;
-	params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
-	params.ramrod_params.index = fp->index;
-	params.ramrod_params.cid = fp->cid;
+	q_params.q_obj = &fp->q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
-#ifdef BCM_CNIC
-	if (IS_FCOE_FP(fp))
-		params.ramrod_params.flags |= CLIENT_IS_FCOE;
+	/* Prepare the INIT parameters */
+	bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
 
-#endif
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_INIT;
 
-	if (is_leading)
-		params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
+	/* Change the state to INIT */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc) {
+		BNX2X_ERR("Queue INIT failed\n");
+		return rc;
+	}
 
-	bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
+	/* Now move the Queue to the SETUP state... */
+	memset(setup_params, 0, sizeof(*setup_params));
 
-	bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
+	/* Set QUEUE flags */
+	setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
 
-	rc = bnx2x_setup_fw_client(bp, &params, 1,
-				     bnx2x_sp(bp, client_init_data),
-				     bnx2x_sp_mapping(bp, client_init_data));
+	/* Set general SETUP parameters */
+	bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params);
+
+	bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause,
+			    &setup_params->rxq_params);
+
+	bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params);
+
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+	/* Change the state to SETUP */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
+		BNX2X_ERR("Queue SETUP failed\n");
+
 	return rc;
 }
 
-static int bnx2x_stop_fw_client(struct bnx2x *bp,
-				struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
 {
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	struct bnx2x_queue_state_params q_params = {0};
 	int rc;
 
-	int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
+	q_params.q_obj = &fp->q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
 	/* halt the connection */
-	*p->pstate = BNX2X_FP_STATE_HALTING;
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
-						  p->cl_id, 0);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
-			       p->pstate, poll_flag);
-	if (rc) /* timeout */
+	q_params.cmd = BNX2X_Q_CMD_HALT;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
 		return rc;
 
-	*p->pstate = BNX2X_FP_STATE_TERMINATING;
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
-						       p->cl_id, 0);
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
-			       p->pstate, poll_flag);
-	if (rc) /* timeout */
+	/* terminate the connection */
+	q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
 		return rc;
 
-
 	/* delete cfc entry */
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
-			       p->pstate, WAIT_RAMROD_COMMON);
-	return rc;
-}
-
-static int bnx2x_stop_client(struct bnx2x *bp, int index)
-{
-	struct bnx2x_client_ramrod_params client_stop = {0};
-	struct bnx2x_fastpath *fp = &bp->fp[index];
-
-	client_stop.index = index;
-	client_stop.cid = fp->cid;
-	client_stop.cl_id = fp->cl_id;
-	client_stop.pstate = &(fp->state);
-	client_stop.poll = 0;
-
-	return bnx2x_stop_fw_client(bp, &client_stop);
+	q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+	return bnx2x_queue_state_change(bp, &q_params);
 }
 
 
@@ -6887,12 +7119,6 @@
 	int port = BP_PORT(bp);
 	int func = BP_FUNC(bp);
 	int i;
-	int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
-			(CHIP_IS_E2(bp) ?
-			 offsetof(struct hc_status_block_data_e2, common) :
-			 offsetof(struct hc_status_block_data_e1x, common));
-	int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
-	int pfid_offset = offsetof(struct pci_entity, pf_id);
 
 	/* Disable the function in the FW */
 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
@@ -6903,20 +7129,21 @@
 	/* FP SBs */
 	for_each_eth_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
-		REG_WR8(bp,
-			BAR_CSTRORM_INTMEM +
-			CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
-			+ pfunc_offset_fp + pfid_offset,
-			HC_FUNCTION_DISABLED);
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+			SB_DISABLED);
 	}
 
+#ifdef BCM_CNIC
+	/* CNIC SB */
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+		CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+		SB_DISABLED);
+#endif
 	/* SP SB */
-	REG_WR8(bp,
-		BAR_CSTRORM_INTMEM +
-		CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
-		pfunc_offset_sp + pfid_offset,
-		HC_FUNCTION_DISABLED);
-
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+		CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+		SB_DISABLED);
 
 	for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
 		REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -6950,7 +7177,7 @@
 	/* Timers workaround bug for E2: if this is vnic-3,
 	 * we need to set the entire ilt range for this timers.
 	 */
-	if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
+	if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
 		struct ilt_client_info ilt_cli;
 		/* use dummy TM client */
 		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
@@ -6962,7 +7189,7 @@
 	}
 
 	/* this assumes that reset_port() called before reset_func()*/
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		bnx2x_pf_disable(bp);
 
 	bp->dmae_ready = 0;
@@ -6973,6 +7200,9 @@
 	int port = BP_PORT(bp);
 	u32 val;
 
+	/* Reset physical Link */
+	bnx2x__link_reset(bp);
+
 	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 
 	/* Do not rcv packets to BRB */
@@ -6994,92 +7224,66 @@
 	/* TODO: Close Doorbell port? */
 }
 
-static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
 {
-	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
-	   BP_ABS_FUNC(bp), reset_code);
+	struct bnx2x_func_state_params func_params = {0};
 
-	switch (reset_code) {
-	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
-		bnx2x_reset_port(bp);
-		bnx2x_reset_func(bp);
-		bnx2x_reset_common(bp);
-		break;
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 
-	case FW_MSG_CODE_DRV_UNLOAD_PORT:
-		bnx2x_reset_port(bp);
-		bnx2x_reset_func(bp);
-		break;
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_RESET;
 
-	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
-		bnx2x_reset_func(bp);
-		break;
+	func_params.params.hw_init.load_phase = load_code;
 
-	default:
-		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
-		break;
-	}
+	return bnx2x_func_state_change(bp, &func_params);
 }
 
-#ifdef BCM_CNIC
-static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
+static inline int bnx2x_func_stop(struct bnx2x *bp)
 {
-	if (bp->flags & FCOE_MACS_SET) {
-		if (!IS_MF_SD(bp))
-			bnx2x_set_fip_eth_mac_addr(bp, 0);
+	struct bnx2x_func_state_params func_params = {0};
+	int rc;
 
-		bnx2x_set_all_enode_macs(bp, 0);
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_STOP;
 
-		bp->flags &= ~FCOE_MACS_SET;
-	}
-}
-#endif
-
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
-{
-	int port = BP_PORT(bp);
-	u32 reset_code = 0;
-	int i, cnt, rc;
-
-	/* Wait until tx fastpath tasks complete */
-	for_each_tx_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		cnt = 1000;
-		while (bnx2x_has_tx_work_unload(fp)) {
-
-			if (!cnt) {
-				BNX2X_ERR("timeout waiting for queue[%d]\n",
-					  i);
+	/*
+	 * Try to stop the function the 'good way'. If fails (in case
+	 * of a parity error during bnx2x_chip_cleanup()) and we are
+	 * not in a debug mode, perform a state transaction in order to
+	 * enable further HW_RESET transaction.
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc) {
 #ifdef BNX2X_STOP_ON_ERROR
-				bnx2x_panic();
-				return -EBUSY;
+		return rc;
 #else
-				break;
+		BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+			  "transaction\n");
+		__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+		return bnx2x_func_state_change(bp, &func_params);
 #endif
-			}
-			cnt--;
-			msleep(1);
-		}
-	}
-	/* Give HW time to discard old tx messages */
-	msleep(1);
-
-	bnx2x_set_eth_mac(bp, 0);
-
-	bnx2x_invalidate_uc_list(bp);
-
-	if (CHIP_IS_E1(bp))
-		bnx2x_invalidate_e1_mc_list(bp);
-	else {
-		bnx2x_invalidate_e1h_mc_list(bp);
-		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 	}
 
-#ifdef BCM_CNIC
-	bnx2x_del_fcoe_eth_macs(bp);
-#endif
+	return 0;
+}
 
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+	u32 reset_code = 0;
+	int port = BP_PORT(bp);
+
+	/* Select the UNLOAD request mode */
 	if (unload_mode == UNLOAD_NORMAL)
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
@@ -7106,54 +7310,135 @@
 	} else
 		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-	/* Close multi and leading connections
-	   Completions for ramrods are collected in a synchronous way */
-	for_each_queue(bp, i)
-
-		if (bnx2x_stop_client(bp, i))
-#ifdef BNX2X_STOP_ON_ERROR
-			return;
-#else
-			goto unload_error;
-#endif
-
-	rc = bnx2x_func_stop(bp);
-	if (rc) {
-		BNX2X_ERR("Function stop failed!\n");
-#ifdef BNX2X_STOP_ON_ERROR
-		return;
-#else
-		goto unload_error;
-#endif
-	}
-#ifndef BNX2X_STOP_ON_ERROR
-unload_error:
-#endif
+	/* Send the request to the MCP */
 	if (!BP_NOMCP(bp))
 		reset_code = bnx2x_fw_command(bp, reset_code, 0);
 	else {
+		int path = BP_PATH(bp);
+
 		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
-				     "%d, %d, %d\n", BP_PATH(bp),
-		   load_count[BP_PATH(bp)][0],
-		   load_count[BP_PATH(bp)][1],
-		   load_count[BP_PATH(bp)][2]);
-		load_count[BP_PATH(bp)][0]--;
-		load_count[BP_PATH(bp)][1 + port]--;
+				     "%d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		load_count[path][0]--;
+		load_count[path][1 + port]--;
 		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
-				     "%d, %d, %d\n", BP_PATH(bp),
-		   load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
-		   load_count[BP_PATH(bp)][2]);
-		if (load_count[BP_PATH(bp)][0] == 0)
+				     "%d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		if (load_count[path][0] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
-		else if (load_count[BP_PATH(bp)][1 + port] == 0)
+		else if (load_count[path][1 + port] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
 		else
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
 	}
 
-	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
-	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
-		bnx2x__link_reset(bp);
+	return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+	/* Report UNLOAD_DONE to MCP */
+	if (!BP_NOMCP(bp))
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+	int port = BP_PORT(bp);
+	int i, rc;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	u32 reset_code;
+
+	/* Wait until tx fastpath tasks complete */
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		rc = bnx2x_clean_tx_queue(bp, fp);
+#ifdef BNX2X_STOP_ON_ERROR
+		if (rc)
+			return;
+#endif
+	}
+
+	/* Give HW time to discard old tx messages */
+	usleep_range(1000, 1000);
+
+	/* Clean all ETH MACs */
+	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+	/* Clean up UC list  */
+	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+				true);
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+			  "%d\n", rc);
+
+	/* Disable LLH */
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+	/* Set "drop all" (stop Rx).
+	 * We need to take a netif_addr_lock() here in order to prevent
+	 * a race between the completion code and this code.
+	 */
+	netif_addr_lock_bh(bp->dev);
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+	else
+		bnx2x_set_storm_rx_mode(bp);
+
+	/* Cleanup multicast configuration */
+	rparam.mcast_obj = &bp->mcast_obj;
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+	netif_addr_unlock_bh(bp->dev);
+
+
+	/* Close multi and leading connections
+	 * Completions for ramrods are collected in a synchronous way
+	 */
+	for_each_queue(bp, i)
+		if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+			return;
+#else
+			goto unload_error;
+#endif
+	/* If SP settings didn't get completed so far - something
+	 * very wrong has happen.
+	 */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+		BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+	rc = bnx2x_func_stop(bp);
+	if (rc) {
+		BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return;
+#endif
+	}
+
+	/*
+	 * Send the UNLOAD_REQUEST to the MCP. This will return if
+	 * this function should perform FUNC, PORT or COMMON HW
+	 * reset.
+	 */
+	reset_code = bnx2x_send_unload_req(bp, unload_mode);
 
 	/* Disable HW interrupts, NAPI */
 	bnx2x_netif_stop(bp, 1);
@@ -7162,12 +7447,13 @@
 	bnx2x_free_irq(bp);
 
 	/* Reset the chip */
-	bnx2x_reset_chip(bp, reset_code);
+	rc = bnx2x_reset_hw(bp, reset_code);
+	if (rc)
+		BNX2X_ERR("HW_RESET failed\n");
+
 
 	/* Report UNLOAD_DONE to MCP */
-	if (!BP_NOMCP(bp))
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
-
+	bnx2x_send_unload_done(bp);
 }
 
 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -7184,7 +7470,7 @@
 		val = REG_RD(bp, addr);
 		val &= ~(0x300);
 		REG_WR(bp, addr, val);
-	} else if (CHIP_IS_E1H(bp)) {
+	} else {
 		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
 		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
 			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
@@ -7195,24 +7481,37 @@
 /* Close gates #2, #3 and #4: */
 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
 {
-	u32 val, addr;
+	u32 val;
 
 	/* Gates #2 and #4a are closed/opened for "not E1" only */
 	if (!CHIP_IS_E1(bp)) {
 		/* #4 */
-		val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
-		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
-		       close ? (val | 0x1) : (val & (~(u32)1)));
+		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
 		/* #2 */
-		val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
-		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
-		       close ? (val | 0x1) : (val & (~(u32)1)));
+		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
 	}
 
 	/* #3 */
-	addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
-	val = REG_RD(bp, addr);
-	REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+	if (CHIP_IS_E1x(bp)) {
+		/* Prevent interrupts from HC on both ports */
+		val = REG_RD(bp, HC_REG_CONFIG_1);
+		REG_WR(bp, HC_REG_CONFIG_1,
+		       (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+		       (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+		val = REG_RD(bp, HC_REG_CONFIG_0);
+		REG_WR(bp, HC_REG_CONFIG_0,
+		       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+		       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+	} else {
+		/* Prevent incomming interrupts in IGU */
+		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+		REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+		       (!close) ?
+		       (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+		       (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+	}
 
 	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
 		close ? "closing" : "opening");
@@ -7330,7 +7629,6 @@
 	if (!CHIP_IS_E1(bp)) {
 		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
 		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
-		REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
 		mmiowb();
 	}
 }
@@ -7345,9 +7643,18 @@
  *      - GRC
  *      - RBCN, RBCP
  */
-static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
 {
 	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+	u32 global_bits2;
+
+	/*
+	 * Bits that have to be set in reset_mask2 if we want to reset 'global'
+	 * (per chip) blocks.
+	 */
+	global_bits2 =
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
 
 	not_reset_mask1 =
 		MISC_REGISTERS_RESET_REG_1_RST_HC |
@@ -7355,7 +7662,7 @@
 		MISC_REGISTERS_RESET_REG_1_RST_PXP;
 
 	not_reset_mask2 =
-		MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+		MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
 		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
 		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
 		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
@@ -7371,20 +7678,76 @@
 	else
 		reset_mask2 = 0x1ffff;
 
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-	       reset_mask1 & (~not_reset_mask1));
+	if (CHIP_IS_E3(bp)) {
+		reset_mask2 |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		reset_mask2 |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+
+	/* Don't reset global blocks unless we need to */
+	if (!global)
+		reset_mask2 &= ~global_bits2;
+
+	/*
+	 * In case of attention in the QM, we need to reset PXP
+	 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+	 * because otherwise QM reset would release 'close the gates' shortly
+	 * before resetting the PXP, then the PSWRQ would send a write
+	 * request to PGLUE. Then when PXP is reset, PGLUE would try to
+	 * read the payload data from PSWWR, but PSWWR would not
+	 * respond. The write queue in PGLUE would stuck, dmae commands
+	 * would not return. Therefore it's important to reset the second
+	 * reset register (containing the
+	 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+	 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+	 * bit).
+	 */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 	       reset_mask2 & (~not_reset_mask2));
 
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       reset_mask1 & (~not_reset_mask1));
+
 	barrier();
 	mmiowb();
 
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
 	mmiowb();
 }
 
-static int bnx2x_process_kill(struct bnx2x *bp)
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp:	driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+	u32 cnt = 1000;
+	u32 pend_bits = 0;
+
+	do {
+		pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+		if (pend_bits == 0)
+			break;
+
+		usleep_range(1000, 1000);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+			  pend_bits);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
 {
 	int cnt = 1000;
 	u32 val = 0;
@@ -7403,7 +7766,7 @@
 		    ((port_is_idle_1 & 0x1) == 0x1) &&
 		    (pgl_exp_rom2 == 0xffffffff))
 			break;
-		msleep(1);
+		usleep_range(1000, 1000);
 	} while (cnt-- > 0);
 
 	if (cnt <= 0) {
@@ -7423,6 +7786,11 @@
 	/* Close gates #2, #3 and #4 */
 	bnx2x_set_234_gates(bp, true);
 
+	/* Poll for IGU VQs for 57712 and newer chips */
+	if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+		return -EAGAIN;
+
+
 	/* TBD: Indicate that "process kill" is in progress to MCP */
 
 	/* Clear "unprepared" bit */
@@ -7435,25 +7803,28 @@
 	/* Wait for 1ms to empty GLUE and PCI-E core queues,
 	 * PSWHST, GRC and PSWRD Tetris buffer.
 	 */
-	msleep(1);
+	usleep_range(1000, 1000);
 
 	/* Prepare to chip reset: */
 	/* MCP */
-	bnx2x_reset_mcp_prep(bp, &val);
+	if (global)
+		bnx2x_reset_mcp_prep(bp, &val);
 
 	/* PXP */
 	bnx2x_pxp_prep(bp);
 	barrier();
 
 	/* reset the chip */
-	bnx2x_process_kill_chip_reset(bp);
+	bnx2x_process_kill_chip_reset(bp, global);
 	barrier();
 
 	/* Recover after reset: */
 	/* MCP */
-	if (bnx2x_reset_mcp_comp(bp, val))
+	if (global && bnx2x_reset_mcp_comp(bp, val))
 		return -EAGAIN;
 
+	/* TBD: Add resetting the NO_MCP mode DB here */
+
 	/* PXP */
 	bnx2x_pxp_prep(bp);
 
@@ -7466,43 +7837,85 @@
 	return 0;
 }
 
-static int bnx2x_leader_reset(struct bnx2x *bp)
+int bnx2x_leader_reset(struct bnx2x *bp)
 {
 	int rc = 0;
+	bool global = bnx2x_reset_is_global(bp);
+
 	/* Try to recover after the failure */
-	if (bnx2x_process_kill(bp)) {
-		printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
-		       bp->dev->name);
+	if (bnx2x_process_kill(bp, global)) {
+		netdev_err(bp->dev, "Something bad had happen on engine %d! "
+				    "Aii!\n", BP_PATH(bp));
 		rc = -EAGAIN;
 		goto exit_leader_reset;
 	}
 
-	/* Clear "reset is in progress" bit and update the driver state */
+	/*
+	 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+	 * state.
+	 */
 	bnx2x_set_reset_done(bp);
-	bp->recovery_state = BNX2X_RECOVERY_DONE;
+	if (global)
+		bnx2x_clear_reset_global(bp);
 
 exit_leader_reset:
 	bp->is_leader = 0;
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-	smp_wmb();
+	bnx2x_release_leader_lock(bp);
+	smp_mb();
 	return rc;
 }
 
-/* Assumption: runs under rtnl lock. This together with the fact
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+	netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+	/* Disconnect this device */
+	netif_device_detach(bp->dev);
+
+	/*
+	 * Block ifup for all function on this engine until "process kill"
+	 * or power cycle.
+	 */
+	bnx2x_set_reset_in_progress(bp);
+
+	/* Shut down the power */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+	smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
  * that it's called only from bnx2x_reset_task() ensure that it
  * will never be called when netif_running(bp->dev) is false.
  */
 static void bnx2x_parity_recover(struct bnx2x *bp)
 {
+	bool global = false;
+
 	DP(NETIF_MSG_HW, "Handling parity\n");
 	while (1) {
 		switch (bp->recovery_state) {
 		case BNX2X_RECOVERY_INIT:
 			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+			bnx2x_chk_parity_attn(bp, &global, false);
+
 			/* Try to get a LEADER_LOCK HW lock */
-			if (bnx2x_trylock_hw_lock(bp,
-				HW_LOCK_RESOURCE_RESERVED_08))
+			if (bnx2x_trylock_leader_lock(bp)) {
+				bnx2x_set_reset_in_progress(bp);
+				/*
+				 * Check if there is a global attention and if
+				 * there was a global attention, set the global
+				 * reset bit.
+				 */
+
+				if (global)
+					bnx2x_set_reset_global(bp);
+
 				bp->is_leader = 1;
+			}
 
 			/* Stop the driver */
 			/* If interface has been removed - break */
@@ -7510,17 +7923,43 @@
 				return;
 
 			bp->recovery_state = BNX2X_RECOVERY_WAIT;
-			/* Ensure "is_leader" and "recovery_state"
-			 *  update values are seen on other CPUs
+
+			/*
+			 * Reset MCP command sequence number and MCP mail box
+			 * sequence as we are going to reset the MCP.
 			 */
-			smp_wmb();
+			if (global) {
+				bp->fw_seq = 0;
+				bp->fw_drv_pulse_wr_seq = 0;
+			}
+
+			/* Ensure "is_leader", MCP command sequence and
+			 * "recovery_state" update values are seen on other
+			 * CPUs.
+			 */
+			smp_mb();
 			break;
 
 		case BNX2X_RECOVERY_WAIT:
 			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
 			if (bp->is_leader) {
-				u32 load_counter = bnx2x_get_load_cnt(bp);
-				if (load_counter) {
+				int other_engine = BP_PATH(bp) ? 0 : 1;
+				u32 other_load_counter =
+					bnx2x_get_load_cnt(bp, other_engine);
+				u32 load_counter =
+					bnx2x_get_load_cnt(bp, BP_PATH(bp));
+				global = bnx2x_reset_is_global(bp);
+
+				/*
+				 * In case of a parity in a global block, let
+				 * the first leader that performs a
+				 * leader_reset() reset the global blocks in
+				 * order to clear global attentions. Otherwise
+				 * the the gates will remain closed for that
+				 * engine.
+				 */
+				if (load_counter ||
+				    (global && other_load_counter)) {
 					/* Wait until all other functions get
 					 * down.
 					 */
@@ -7533,37 +7972,27 @@
 					 * normal. In any case it's an exit
 					 * point for a leader.
 					 */
-					if (bnx2x_leader_reset(bp) ||
-					bnx2x_nic_load(bp, LOAD_NORMAL)) {
-						printk(KERN_ERR"%s: Recovery "
-						"has failed. Power cycle is "
-						"needed.\n", bp->dev->name);
-						/* Disconnect this device */
-						netif_device_detach(bp->dev);
-						/* Block ifup for all function
-						 * of this ASIC until
-						 * "process kill" or power
-						 * cycle.
-						 */
-						bnx2x_set_reset_in_progress(bp);
-						/* Shut down the power */
-						bnx2x_set_power_state(bp,
-								PCI_D3hot);
+					if (bnx2x_leader_reset(bp)) {
+						bnx2x_recovery_failed(bp);
 						return;
 					}
 
-					return;
+					/* If we are here, means that the
+					 * leader has succeeded and doesn't
+					 * want to be a leader any more. Try
+					 * to continue as a none-leader.
+					 */
+					break;
 				}
 			} else { /* non-leader */
-				if (!bnx2x_reset_is_done(bp)) {
+				if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
 					/* Try to get a LEADER_LOCK HW lock as
 					 * long as a former leader may have
 					 * been unloaded by the user or
 					 * released a leadership by another
 					 * reason.
 					 */
-					if (bnx2x_trylock_hw_lock(bp,
-					    HW_LOCK_RESOURCE_RESERVED_08)) {
+					if (bnx2x_trylock_leader_lock(bp)) {
 						/* I'm a leader now! Restart a
 						 * switch case.
 						 */
@@ -7575,14 +8004,25 @@
 								HZ/10);
 					return;
 
-				} else { /* A leader has completed
-					  * the "process kill". It's an exit
-					  * point for a non-leader.
-					  */
-					bnx2x_nic_load(bp, LOAD_NORMAL);
-					bp->recovery_state =
-						BNX2X_RECOVERY_DONE;
-					smp_wmb();
+				} else {
+					/*
+					 * If there was a global attention, wait
+					 * for it to be cleared.
+					 */
+					if (bnx2x_reset_is_global(bp)) {
+						schedule_delayed_work(
+							&bp->reset_task, HZ/10);
+						return;
+					}
+
+					if (bnx2x_nic_load(bp, LOAD_NORMAL))
+						bnx2x_recovery_failed(bp);
+					else {
+						bp->recovery_state =
+							BNX2X_RECOVERY_DONE;
+						smp_mb();
+					}
+
 					return;
 				}
 			}
@@ -7624,6 +8064,37 @@
 
 /* end of nic load/unload */
 
+static void bnx2x_period_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+	if (!netif_running(bp->dev))
+		goto period_task_exit;
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		BNX2X_ERR("period task called on emulation, ignoring\n");
+		goto period_task_exit;
+	}
+
+	bnx2x_acquire_phy_lock(bp);
+	/*
+	 * The barrier is needed to ensure the ordering between the writing to
+	 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+	 * the reading here.
+	 */
+	smp_mb();
+	if (bp->port.pmf) {
+		bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+		/* Re-queue task in 1 sec */
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+	}
+
+	bnx2x_release_phy_lock(bp);
+period_task_exit:
+	return;
+}
+
 /*
  * Init service functions
  */
@@ -7681,8 +8152,8 @@
 			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 			/* save our pf_num */
 			int orig_pf_num = bp->pf_num;
-			u32 swap_en;
-			u32 swap_val;
+			int port;
+			u32 swap_en, swap_val, value;
 
 			/* clear the UNDI indication */
 			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
@@ -7717,21 +8188,19 @@
 			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 
 			bnx2x_undi_int_disable(bp);
+			port = BP_PORT(bp);
 
 			/* close input traffic and wait for it */
 			/* Do not rcv packets to BRB */
-			REG_WR(bp,
-			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
-					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+					   NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
 			/* Do not direct rcv packets that are not for MCP to
 			 * the BRB */
-			REG_WR(bp,
-			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
-					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+					   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 			/* clear AEU */
-			REG_WR(bp,
-			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+			REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+					   MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
 			msleep(10);
 
 			/* save NIG port swap info */
@@ -7741,9 +8210,17 @@
 			REG_WR(bp,
 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
 			       0xd3ffffff);
+
+			value = 0x1400;
+			if (CHIP_IS_E3(bp)) {
+				value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+				value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+			}
+
 			REG_WR(bp,
 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-			       0x1403);
+			       value);
+
 			/* take the NIG out of reset and restore swap values */
 			REG_WR(bp,
 			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
@@ -7784,7 +8261,7 @@
 	/* Set doorbell size */
 	bp->db_size = (1 << BNX2X_DB_SHIFT);
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
 		if ((val & 1) == 0)
 			val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
@@ -7804,16 +8281,6 @@
 		bp->pfid = bp->pf_num;			/* 0..7 */
 	}
 
-	/*
-	 * set base FW non-default (fast path) status block id, this value is
-	 * used to initialize the fw_sb_id saved on the fp/queue structure to
-	 * determine the id used by the FW.
-	 */
-	if (CHIP_IS_E1x(bp))
-		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
-	else /* E2 */
-		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
-
 	bp->link_params.chip_id = bp->common.chip_id;
 	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
 
@@ -7825,13 +8292,15 @@
 	}
 
 	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
-	bp->common.flash_size = (NVRAM_1MB_SIZE <<
+	bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
 				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
 	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
 		       bp->common.flash_size, bp->common.flash_size);
 
 	bnx2x_init_shmem(bp);
 
+
+
 	bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
 					MISC_REG_GENERIC_CR_1 :
 					MISC_REG_GENERIC_CR_0));
@@ -7941,8 +8410,14 @@
 			}
 		}
 	}
-	bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
-				   NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
+
+	/* It's expected that number of CAM entries for this
+	 * functions is equal to the MSI-X table size (which was a
+	 * used during bp->l2_cid_count value calculation.
+	 * We want a harsh warning if these values are different!
+	 */
+	WARN_ON(bp->igu_sb_cnt != NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
+
 	if (bp->igu_sb_cnt == 0)
 		BNX2X_ERR("CAM configuration error\n");
 }
@@ -7991,24 +8466,25 @@
 			return;
 	}
 
-	switch (switch_cfg) {
-	case SWITCH_CFG_1G:
-		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
-					   port*0x10);
-		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-		break;
-
-	case SWITCH_CFG_10G:
-		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
-					   port*0x18);
-		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-		break;
-
-	default:
-		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
-			  bp->port.link_config[0]);
-		return;
+	if (CHIP_IS_E3(bp))
+		bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+	else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+			break;
+		case SWITCH_CFG_10G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+			break;
+		default:
+			BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+				  bp->port.link_config[0]);
+			return;
+		}
 	}
+	BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
 	/* mask what we support according to speed_cap_mask per configuration */
 	for (idx = 0; idx < cfg_size; idx++) {
 		if (!(bp->link_params.speed_cap_mask[idx] &
@@ -8089,7 +8565,7 @@
 					(ADVERTISED_10baseT_Full |
 					 ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 					    "Invalid link_config 0x%x"
 					    "  speed_cap_mask 0x%x\n",
 					    link_config,
@@ -8108,7 +8584,7 @@
 					(ADVERTISED_10baseT_Half |
 					 ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 					    "Invalid link_config 0x%x"
 					    "  speed_cap_mask 0x%x\n",
 					    link_config,
@@ -8126,7 +8602,7 @@
 					(ADVERTISED_100baseT_Full |
 					 ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 					    "Invalid link_config 0x%x"
 					    "  speed_cap_mask 0x%x\n",
 					    link_config,
@@ -8146,7 +8622,7 @@
 					(ADVERTISED_100baseT_Half |
 					 ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 				    "Invalid link_config 0x%x"
 				    "  speed_cap_mask 0x%x\n",
 				    link_config,
@@ -8164,7 +8640,7 @@
 					(ADVERTISED_1000baseT_Full |
 					 ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 				    "Invalid link_config 0x%x"
 				    "  speed_cap_mask 0x%x\n",
 				    link_config,
@@ -8182,7 +8658,7 @@
 					(ADVERTISED_2500baseX_Full |
 						ADVERTISED_TP);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 				    "Invalid link_config 0x%x"
 				    "  speed_cap_mask 0x%x\n",
 				    link_config,
@@ -8192,8 +8668,6 @@
 			break;
 
 		case PORT_FEATURE_LINK_SPEED_10G_CX4:
-		case PORT_FEATURE_LINK_SPEED_10G_KX4:
-		case PORT_FEATURE_LINK_SPEED_10G_KR:
 			if (bp->port.supported[idx] &
 			    SUPPORTED_10000baseT_Full) {
 				bp->link_params.req_line_speed[idx] =
@@ -8202,7 +8676,7 @@
 					(ADVERTISED_10000baseT_Full |
 						ADVERTISED_FIBRE);
 			} else {
-				BNX2X_ERROR("NVRAM config error. "
+				BNX2X_ERR("NVRAM config error. "
 				    "Invalid link_config 0x%x"
 				    "  speed_cap_mask 0x%x\n",
 				    link_config,
@@ -8210,11 +8684,14 @@
 				return;
 			}
 			break;
+		case PORT_FEATURE_LINK_SPEED_20G:
+			bp->link_params.req_line_speed[idx] = SPEED_20000;
 
+			break;
 		default:
-			BNX2X_ERROR("NVRAM config error. "
-				    "BAD link speed link_config 0x%x\n",
-					  link_config);
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD link speed link_config 0x%x\n",
+				  link_config);
 				bp->link_params.req_line_speed[idx] =
 							SPEED_AUTO_NEG;
 				bp->port.advertising[idx] =
@@ -8364,6 +8841,9 @@
 	u8 *fip_mac = bp->fip_mac;
 #endif
 
+	/* Zero primary MAC configuration */
+	memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
 	if (BP_NOMCP(bp)) {
 		BNX2X_ERROR("warning: random MAC workaround active\n");
 		random_ether_addr(bp->dev->dev_addr);
@@ -8385,9 +8865,10 @@
 						     iscsi_mac_addr_upper);
 				val = MF_CFG_RD(bp, func_ext_config[func].
 						    iscsi_mac_addr_lower);
-				BNX2X_DEV_INFO("Read iSCSI MAC: "
-					       "0x%x:0x%04x\n", val2, val);
 				bnx2x_set_mac_buf(iscsi_mac, val, val2);
+				BNX2X_DEV_INFO("Read iSCSI MAC: "
+					       BNX2X_MAC_FMT"\n",
+					       BNX2X_MAC_PRN_LIST(iscsi_mac));
 			} else
 				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 
@@ -8396,9 +8877,10 @@
 						     fcoe_mac_addr_upper);
 				val = MF_CFG_RD(bp, func_ext_config[func].
 						    fcoe_mac_addr_lower);
-				BNX2X_DEV_INFO("Read FCoE MAC to "
-					       "0x%x:0x%04x\n", val2, val);
 				bnx2x_set_mac_buf(fip_mac, val, val2);
+				BNX2X_DEV_INFO("Read FCoE L2 MAC to "
+					       BNX2X_MAC_FMT"\n",
+					       BNX2X_MAC_PRN_LIST(fip_mac));
 
 			} else
 				bp->flags |= NO_FCOE_FLAG;
@@ -8447,6 +8929,13 @@
 		memset(bp->fip_mac, 0, ETH_ALEN);
 	}
 #endif
+
+	if (!is_valid_ether_addr(bp->dev->dev_addr))
+		dev_err(&bp->pdev->dev,
+			"bad Ethernet MAC address configuration: "
+			BNX2X_MAC_FMT", change it manually before bringing up "
+			"the appropriate network interface\n",
+			BNX2X_MAC_PRN_LIST(bp->dev->dev_addr));
 }
 
 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8468,17 +8957,55 @@
 	} else {
 		bp->common.int_block = INT_BLOCK_IGU;
 		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
 		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
-			DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
+			int tout = 5000;
+
+			BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+			val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+			REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+			REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+			while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				tout--;
+				usleep_range(1000, 1000);
+			}
+
+			if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				dev_err(&bp->pdev->dev,
+					"FORCING Normal Mode failed!!!\n");
+				return -EPERM;
+			}
+		}
+
+		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+			BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
 			bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
 		} else
-			DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
+			BNX2X_DEV_INFO("IGU Normal Mode\n");
 
 		bnx2x_get_igu_cam_info(bp);
 
 	}
-	DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
-			     bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
+
+	/*
+	 * set base FW non-default (fast path) status block id, this value is
+	 * used to initialize the fw_sb_id saved on the fp/queue structure to
+	 * determine the id used by the FW.
+	 */
+	if (CHIP_IS_E1x(bp))
+		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+	else /*
+	      * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+	      * the same queue are indicated on the same IGU SB). So we prefer
+	      * FW and IGU SBs to be the same value.
+	      */
+		bp->base_fw_ndsb = bp->igu_base_sb;
+
+	BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
+		       "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+		       bp->igu_sb_cnt, bp->base_fw_ndsb);
 
 	/*
 	 * Initialize MF configuration
@@ -8489,10 +9016,10 @@
 	vn = BP_E1HVN(bp);
 
 	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
-		DP(NETIF_MSG_PROBE,
-			    "shmem2base 0x%x, size %d, mfcfg offset %d\n",
-			    bp->common.shmem2_base, SHMEM2_RD(bp, size),
-			    (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+			       bp->common.shmem2_base, SHMEM2_RD(bp, size),
+			      (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
 		if (SHMEM2_HAS(bp, mf_cfg_addr))
 			bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
 		else
@@ -8523,8 +9050,8 @@
 					bp->mf_config[vn] = MF_CFG_RD(bp,
 						   func_mf_config[func].config);
 				} else
-					DP(NETIF_MSG_PROBE, "illegal MAC "
-							    "address for SI\n");
+					BNX2X_DEV_INFO("illegal MAC address "
+						       "for SI\n");
 				break;
 			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 				/* get OV configuration */
@@ -8537,14 +9064,12 @@
 					bp->mf_config[vn] = MF_CFG_RD(bp,
 						func_mf_config[func].config);
 				} else
-					DP(NETIF_MSG_PROBE, "illegal OV for "
-							    "SD\n");
+					BNX2X_DEV_INFO("illegal OV for SD\n");
 				break;
 			default:
 				/* Unknown configuration: reset mf_config */
 				bp->mf_config[vn] = 0;
-				DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
-				   val);
+				BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
 			}
 		}
 
@@ -8557,13 +9082,16 @@
 			      FUNC_MF_CFG_E1HOV_TAG_MASK;
 			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 				bp->mf_ov = val;
-				BNX2X_DEV_INFO("MF OV for func %d is %d"
-					       " (0x%04x)\n", func,
-					       bp->mf_ov, bp->mf_ov);
+				bp->path_has_ovlan = true;
+
+				BNX2X_DEV_INFO("MF OV for func %d is %d "
+					       "(0x%04x)\n", func, bp->mf_ov,
+					       bp->mf_ov);
 			} else {
-				BNX2X_ERR("No valid MF OV for func %d,"
-					  "  aborting\n", func);
-				rc = -EPERM;
+				dev_err(&bp->pdev->dev,
+					"No valid MF OV for func %d, "
+					"aborting\n", func);
+				return -EPERM;
 			}
 			break;
 		case MULTI_FUNCTION_SI:
@@ -8572,31 +9100,40 @@
 			break;
 		default:
 			if (vn) {
-				BNX2X_ERR("VN %d in single function mode,"
-					  "  aborting\n", vn);
-				rc = -EPERM;
+				dev_err(&bp->pdev->dev,
+					"VN %d is in a single function mode, "
+					"aborting\n", vn);
+				return -EPERM;
 			}
 			break;
 		}
 
+		/* check if other port on the path needs ovlan:
+		 * Since MF configuration is shared between ports
+		 * Possible mixed modes are only
+		 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+		 */
+		if (CHIP_MODE_IS_4_PORT(bp) &&
+		    !bp->path_has_ovlan &&
+		    !IS_MF(bp) &&
+		    bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			u8 other_port = !BP_PORT(bp);
+			u8 other_func = BP_PATH(bp) + 2*other_port;
+			val = MF_CFG_RD(bp,
+					func_mf_config[other_func].e1hov_tag);
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+				bp->path_has_ovlan = true;
+		}
 	}
 
 	/* adjust igu_sb_cnt to MF for E1x */
 	if (CHIP_IS_E1x(bp) && IS_MF(bp))
 		bp->igu_sb_cnt /= E1HVN_MAX;
 
-	/*
-	 * adjust E2 sb count: to be removed when FW will support
-	 * more then 16 L2 clients
-	 */
-#define MAX_L2_CLIENTS				16
-	if (CHIP_IS_E2(bp))
-		bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
-				       MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
+	/* port info */
+	bnx2x_get_port_hwinfo(bp);
 
 	if (!BP_NOMCP(bp)) {
-		bnx2x_get_port_hwinfo(bp);
-
 		bp->fw_seq =
 			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
 			 DRV_MSG_SEQ_NUMBER_MASK);
@@ -8610,6 +9147,16 @@
 	bnx2x_get_cnic_info(bp);
 #endif
 
+	/* Get current FW pulse sequence */
+	if (!BP_NOMCP(bp)) {
+		int mb_idx = BP_FW_MB_IDX(bp);
+
+		bp->fw_drv_pulse_wr_seq =
+				(SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+				 DRV_PULSE_SEQ_MASK);
+		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+	}
+
 	return rc;
 }
 
@@ -8677,16 +9224,61 @@
 	return;
 }
 
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+	u32 flags = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		SET_FLAGS(flags, MODE_FPGA);
+	else if (CHIP_REV_IS_EMUL(bp))
+		SET_FLAGS(flags, MODE_EMUL);
+	else
+		SET_FLAGS(flags, MODE_ASIC);
+
+	if (CHIP_MODE_IS_4_PORT(bp))
+		SET_FLAGS(flags, MODE_PORT4);
+	else
+		SET_FLAGS(flags, MODE_PORT2);
+
+	if (CHIP_IS_E2(bp))
+		SET_FLAGS(flags, MODE_E2);
+	else if (CHIP_IS_E3(bp)) {
+		SET_FLAGS(flags, MODE_E3);
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			SET_FLAGS(flags, MODE_E3_A0);
+		else {/*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+			SET_FLAGS(flags, MODE_E3_B0);
+			SET_FLAGS(flags, MODE_COS_BC);
+		}
+	}
+
+	if (IS_MF(bp)) {
+		SET_FLAGS(flags, MODE_MF);
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			SET_FLAGS(flags, MODE_MF_SD);
+			break;
+		case MULTI_FUNCTION_SI:
+			SET_FLAGS(flags, MODE_MF_SI);
+			break;
+		}
+	} else
+		SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+	SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+	SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+	INIT_MODE_FLAGS(bp) = flags;
+}
+
 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 {
 	int func;
 	int timer_interval;
 	int rc;
 
-	/* Disable interrupt handling until HW is initialized */
-	atomic_set(&bp->intr_sem, 1);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
 	mutex_init(&bp->port.phy_mutex);
 	mutex_init(&bp->fw_mb_mutex);
 	spin_lock_init(&bp->stats_lock);
@@ -8696,11 +9288,16 @@
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
-
+	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
 	rc = bnx2x_get_hwinfo(bp);
+	if (rc)
+		return rc;
 
-	if (!rc)
-		rc = bnx2x_alloc_mem_bp(bp);
+	bnx2x_set_modes_bitmap(bp);
+
+	rc = bnx2x_alloc_mem_bp(bp);
+	if (rc)
+		return rc;
 
 	bnx2x_read_fwinfo(bp);
 
@@ -8718,7 +9315,6 @@
 					"must load devices in order!\n");
 
 	bp->multi_mode = multi_mode;
-	bp->int_mode = int_mode;
 
 	/* Set TPA flags */
 	if (disable_tpa) {
@@ -8754,6 +9350,13 @@
 	bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
 	bnx2x_dcbx_init_params(bp);
 
+#ifdef BCM_CNIC
+	if (CHIP_IS_E1x(bp))
+		bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+	else
+		bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
 	return rc;
 }
 
@@ -8762,49 +9365,70 @@
 * General service functions
 ****************************************************************************/
 
+/*
+ * net_device service functions
+ */
+
 /* called with rtnl_lock */
 static int bnx2x_open(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
+	bool global = false;
+	int other_engine = BP_PATH(bp) ? 0 : 1;
+	u32 other_load_counter, load_counter;
 
 	netif_carrier_off(dev);
 
 	bnx2x_set_power_state(bp, PCI_D0);
 
-	if (!bnx2x_reset_is_done(bp)) {
-		do {
-			/* Reset MCP mail box sequence if there is on going
-			 * recovery
-			 */
-			bp->fw_seq = 0;
+	other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+	load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
 
-			/* If it's the first function to load and reset done
-			 * is still not cleared it may mean that. We don't
-			 * check the attention state here because it may have
-			 * already been cleared by a "common" reset but we
-			 * shell proceed with "process kill" anyway.
+	/*
+	 * If parity had happen during the unload, then attentions
+	 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+	 * want the first function loaded on the current engine to
+	 * complete the recovery.
+	 */
+	if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+	    bnx2x_chk_parity_attn(bp, &global, true))
+		do {
+			/*
+			 * If there are attentions and they are in a global
+			 * blocks, set the GLOBAL_RESET bit regardless whether
+			 * it will be this function that will complete the
+			 * recovery or not.
 			 */
-			if ((bnx2x_get_load_cnt(bp) == 0) &&
-				bnx2x_trylock_hw_lock(bp,
-				HW_LOCK_RESOURCE_RESERVED_08) &&
-				(!bnx2x_leader_reset(bp))) {
-				DP(NETIF_MSG_HW, "Recovered in open\n");
+			if (global)
+				bnx2x_set_reset_global(bp);
+
+			/*
+			 * Only the first function on the current engine should
+			 * try to recover in open. In case of attentions in
+			 * global blocks only the first in the chip should try
+			 * to recover.
+			 */
+			if ((!load_counter &&
+			     (!global || !other_load_counter)) &&
+			    bnx2x_trylock_leader_lock(bp) &&
+			    !bnx2x_leader_reset(bp)) {
+				netdev_info(bp->dev, "Recovered in open\n");
 				break;
 			}
 
+			/* recovery has failed... */
 			bnx2x_set_power_state(bp, PCI_D3hot);
+			bp->recovery_state = BNX2X_RECOVERY_FAILED;
 
-			printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+			netdev_err(bp->dev, "Recovery flow hasn't been properly"
 			" completed yet. Try again later. If u still see this"
 			" message after a few retries then power cycle is"
-			" required.\n", bp->dev->name);
+			" required.\n");
 
 			return -EAGAIN;
 		} while (0);
-	}
 
 	bp->recovery_state = BNX2X_RECOVERY_DONE;
-
 	return bnx2x_nic_load(bp, LOAD_OPEN);
 }
 
@@ -8815,198 +9439,126 @@
 
 	/* Unload the driver, release IRQs */
 	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+	/* Power off */
 	bnx2x_set_power_state(bp, PCI_D3hot);
 
 	return 0;
 }
 
-#define E1_MAX_UC_LIST	29
-#define E1H_MAX_UC_LIST	30
-#define E2_MAX_UC_LIST	14
-static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+					 struct bnx2x_mcast_ramrod_params *p)
 {
-	if (CHIP_IS_E1(bp))
-		return E1_MAX_UC_LIST;
-	else if (CHIP_IS_E1H(bp))
-		return E1H_MAX_UC_LIST;
-	else
-		return E2_MAX_UC_LIST;
-}
-
-
-static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
-{
-	if (CHIP_IS_E1(bp))
-		/* CAM Entries for Port0:
-		 *      0 - prim ETH MAC
-		 *      1 - BCAST MAC
-		 *      2 - iSCSI L2 ring ETH MAC
-		 *      3-31 - UC MACs
-		 *
-		 * Port1 entries are allocated the same way starting from
-		 * entry 32.
-		 */
-		return 3 + 32 * BP_PORT(bp);
-	else if (CHIP_IS_E1H(bp)) {
-		/* CAM Entries:
-		 *      0-7  - prim ETH MAC for each function
-		 *      8-15 - iSCSI L2 ring ETH MAC for each function
-		 *      16 till 255 UC MAC lists for each function
-		 *
-		 * Remark: There is no FCoE support for E1H, thus FCoE related
-		 *         MACs are not considered.
-		 */
-		return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
-			bnx2x_max_uc_list(bp) * BP_FUNC(bp);
-	} else {
-		/* CAM Entries (there is a separate CAM per engine):
-		 *      0-4  - prim ETH MAC for each function
-		 *      4-7 - iSCSI L2 ring ETH MAC for each function
-		 *      8-11 - FIP ucast L2 MAC for each function
-		 *      12-15 - ALL_ENODE_MACS mcast MAC for each function
-		 *      16 till 71 UC MAC lists for each function
-		 */
-		u8 func_idx =
-			(CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
-
-		return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
-			bnx2x_max_uc_list(bp) * func_idx;
-	}
-}
-
-/* set uc list, do not wait as wait implies sleep and
- * set_rx_mode can be invoked from non-sleepable context.
- *
- * Instead we use the same ramrod data buffer each time we need
- * to configure a list of addresses, and use the fact that the
- * list of MACs is changed in an incremental way and that the
- * function is called under the netif_addr_lock. A temporary
- * inconsistent CAM configuration (possible in case of very fast
- * sequence of add/del/add on the host side) will shortly be
- * restored by the handler of the last ramrod.
- */
-static int bnx2x_set_uc_list(struct bnx2x *bp)
-{
-	int i = 0, old;
-	struct net_device *dev = bp->dev;
-	u8 offset = bnx2x_uc_list_cam_offset(bp);
+	int mc_count = netdev_mc_count(bp->dev);
+	struct bnx2x_mcast_list_elem *mc_mac =
+		kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
 	struct netdev_hw_addr *ha;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
 
-	if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
-		return -EINVAL;
+	if (!mc_mac)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&p->mcast_list);
+
+	netdev_for_each_mc_addr(ha, bp->dev) {
+		mc_mac->mac = bnx2x_mc_addr(ha);
+		list_add_tail(&mc_mac->link, &p->mcast_list);
+		mc_mac++;
+	}
+
+	p->mcast_list_len = mc_count;
+
+	return 0;
+}
+
+static inline void bnx2x_free_mcast_macs_list(
+	struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_mcast_list_elem *mc_mac =
+		list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+				 link);
+
+	WARN_ON(!mc_mac);
+	kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+	int rc;
+	struct net_device *dev = bp->dev;
+	struct netdev_hw_addr *ha;
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+	unsigned long ramrod_flags = 0;
+
+	/* First schedule a cleanup up of old configuration */
+	rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+		return rc;
+	}
 
 	netdev_for_each_uc_addr(ha, dev) {
-		/* copy mac */
-		config_cmd->config_table[i].msb_mac_addr =
-			swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
-		config_cmd->config_table[i].middle_mac_addr =
-			swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
-		config_cmd->config_table[i].lsb_mac_addr =
-			swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
-
-		config_cmd->config_table[i].vlan_id = 0;
-		config_cmd->config_table[i].pf_id = BP_FUNC(bp);
-		config_cmd->config_table[i].clients_bit_vector =
-			cpu_to_le32(1 << BP_L_ID(bp));
-
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_SET);
-
-		DP(NETIF_MSG_IFUP,
-		   "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
-		   config_cmd->config_table[i].msb_mac_addr,
-		   config_cmd->config_table[i].middle_mac_addr,
-		   config_cmd->config_table[i].lsb_mac_addr);
-
-		i++;
-
-		/* Set uc MAC in NIG */
-		bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
-				     LLH_CAM_ETH_LINE + i);
-	}
-	old = config_cmd->hdr.length;
-	if (old > i) {
-		for (; i < old; i++) {
-			if (CAM_IS_INVALID(config_cmd->
-					   config_table[i])) {
-				/* already invalidated */
-				break;
-			}
-			/* invalidate */
-			SET_FLAG(config_cmd->config_table[i].flags,
-				MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-				T_ETH_MAC_COMMAND_INVALIDATE);
+		rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+				       BNX2X_UC_LIST_MAC, &ramrod_flags);
+		if (rc < 0) {
+			BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+				  rc);
+			return rc;
 		}
 	}
 
-	wmb();
-
-	config_cmd->hdr.length = i;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* Mark that this ramrod doesn't use bp->set_mac_pending for
-	 * synchronization.
-	 */
-	config_cmd->hdr.echo = 0;
-
-	mb();
-
-	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-
-}
-
-void bnx2x_invalidate_uc_list(struct bnx2x *bp)
-{
-	int i;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
-	int ramrod_flags = WAIT_RAMROD_COMMON;
-	u8 offset = bnx2x_uc_list_cam_offset(bp);
-	u8 max_list_size = bnx2x_max_uc_list(bp);
-
-	for (i = 0; i < max_list_size; i++) {
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_INVALIDATE);
-		bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
-	}
-
-	wmb();
-
-	config_cmd->hdr.length = max_list_size;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* We'll wait for a completion this time... */
-	config_cmd->hdr.echo = 1;
-
-	bp->set_mac_pending = 1;
-
-	mb();
-
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
-				ramrod_flags);
-
+	/* Execute the pending commands */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+				 BNX2X_UC_LIST_MAC, &ramrod_flags);
 }
 
 static inline int bnx2x_set_mc_list(struct bnx2x *bp)
 {
-	/* some multicasts */
-	if (CHIP_IS_E1(bp)) {
-		return bnx2x_set_e1_mc_list(bp);
-	} else { /* E1H and newer */
-		return bnx2x_set_e1h_mc_list(bp);
+	struct net_device *dev = bp->dev;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	int rc = 0;
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	/* first, clear all configured multicast MACs */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to clear multicast "
+			  "configuration: %d\n", rc);
+		return rc;
 	}
+
+	/* then, configure a new MACs list */
+	if (netdev_mc_count(dev)) {
+		rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+		if (rc) {
+			BNX2X_ERR("Failed to create multicast MACs "
+				  "list: %d\n", rc);
+			return rc;
+		}
+
+		/* Now add the new MACs */
+		rc = bnx2x_config_mcast(bp, &rparam,
+					BNX2X_MCAST_CMD_ADD);
+		if (rc < 0)
+			BNX2X_ERR("Failed to set a new multicast "
+				  "configuration: %d\n", rc);
+
+		bnx2x_free_mcast_macs_list(&rparam);
+	}
+
+	return rc;
 }
 
-/* called with netif_tx_lock from dev_mcast.c */
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
 void bnx2x_set_rx_mode(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -9017,23 +9569,31 @@
 		return;
 	}
 
-	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
 
 	if (dev->flags & IFF_PROMISC)
 		rx_mode = BNX2X_RX_MODE_PROMISC;
-	else if (dev->flags & IFF_ALLMULTI)
+	else if ((dev->flags & IFF_ALLMULTI) ||
+		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+		  CHIP_IS_E1(bp)))
 		rx_mode = BNX2X_RX_MODE_ALLMULTI;
 	else {
 		/* some multicasts */
-		if (bnx2x_set_mc_list(bp))
+		if (bnx2x_set_mc_list(bp) < 0)
 			rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
-		/* some unicasts */
-		if (bnx2x_set_uc_list(bp))
+		if (bnx2x_set_uc_list(bp) < 0)
 			rx_mode = BNX2X_RX_MODE_PROMISC;
 	}
 
 	bp->rx_mode = rx_mode;
+
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+		return;
+	}
+
 	bnx2x_set_storm_rx_mode(bp);
 }
 
@@ -9124,8 +9684,28 @@
 #endif
 };
 
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+	struct device *dev = &bp->pdev->dev;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+		bp->flags |= USING_DAC_FLAG;
+		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+			dev_err(dev, "dma_set_coherent_mask failed, "
+				     "aborting\n");
+			return -EIO;
+		}
+	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(dev, "System does not support DMA, aborting\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
-				    struct net_device *dev)
+				    struct net_device *dev,
+				    unsigned long board_type)
 {
 	struct bnx2x *bp;
 	int rc;
@@ -9187,21 +9767,9 @@
 		goto err_out_release;
 	}
 
-	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
-		bp->flags |= USING_DAC_FLAG;
-		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
-			dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
-			       " failed, aborting\n");
-			rc = -EIO;
-			goto err_out_release;
-		}
-
-	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
-		dev_err(&bp->pdev->dev,
-			"System does not support DMA, aborting\n");
-		rc = -EIO;
+	rc = bnx2x_set_coherency_mask(bp);
+	if (rc)
 		goto err_out_release;
-	}
 
 	dev->mem_start = pci_resource_start(pdev, 0);
 	dev->base_addr = dev->mem_start;
@@ -9237,6 +9805,12 @@
 	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
 	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
 
+	/**
+	 * Enable internal target-read (in case we are probed after PF FLR).
+	 * Must be done prior to any BAR read access
+	 */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
 	/* Reset the load counter */
 	bnx2x_clear_load_cnt(bp);
 
@@ -9451,7 +10025,7 @@
 		fw_file_name = FW_FILE_NAME_E1;
 	else if (CHIP_IS_E1H(bp))
 		fw_file_name = FW_FILE_NAME_E1H;
-	else if (CHIP_IS_E2(bp))
+	else if (!CHIP_IS_E1x(bp))
 		fw_file_name = FW_FILE_NAME_E2;
 	else {
 		BNX2X_ERR("Unsupported chip revision\n");
@@ -9519,6 +10093,44 @@
 	return rc;
 }
 
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+	kfree(bp->init_ops_offsets);
+	kfree(bp->init_ops);
+	kfree(bp->init_data);
+	release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+	.init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+	.init_hw_cmn      = bnx2x_init_hw_common,
+	.init_hw_port     = bnx2x_init_hw_port,
+	.init_hw_func     = bnx2x_init_hw_func,
+
+	.reset_hw_cmn     = bnx2x_reset_common,
+	.reset_hw_port    = bnx2x_reset_port,
+	.reset_hw_func    = bnx2x_reset_func,
+
+	.gunzip_init      = bnx2x_gunzip_init,
+	.gunzip_end       = bnx2x_gunzip_end,
+
+	.init_fw          = bnx2x_init_firmware,
+	.release_fw       = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+	/* Prepare DMAE related driver resources */
+	bnx2x_setup_dmae(bp);
+
+	bnx2x_init_func_obj(bp, &bp->func_obj,
+			    bnx2x_sp(bp, func_rdata),
+			    bnx2x_sp_mapping(bp, func_rdata),
+			    &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
 {
 	int cid_count = L2_FP_COUNT(l2_cid_count);
@@ -9529,6 +10141,25 @@
 	return roundup(cid_count, QM_CID_ROUND);
 }
 
+/**
+ * bnx2x_pci_msix_table_size - get the size of the MSI-X table.
+ *
+ * @dev:	pci device
+ *
+ */
+static inline int bnx2x_pci_msix_table_size(struct pci_dev *pdev)
+{
+	int pos;
+	u16 control;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+	if (!pos)
+		return 0;
+
+	pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
+	return (control & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
@@ -9541,12 +10172,28 @@
 	case BCM57710:
 	case BCM57711:
 	case BCM57711E:
-		cid_count = FP_SB_MAX_E1x;
-		break;
-
 	case BCM57712:
-	case BCM57712E:
-		cid_count = FP_SB_MAX_E2;
+	case BCM57712_MF:
+	case BCM57800:
+	case BCM57800_MF:
+	case BCM57810:
+	case BCM57810_MF:
+	case BCM57840:
+	case BCM57840_MF:
+		/* The size requested for the MSI-X table corresponds to the
+		 * actual amount of avaliable IGU/HC status blocks. It includes
+		 * the default SB vector but we want cid_count to contain the
+		 * amount of only non-default SBs, that's what '-1' stands for.
+		 */
+		cid_count = bnx2x_pci_msix_table_size(pdev) - 1;
+
+		/* do not allow initial cid_count grow above 16
+		 * since Special CIDs starts from this number
+		 * use old FP_SB_MAX_E1x define for this matter
+		 */
+		cid_count = min_t(int, FP_SB_MAX_E1x, cid_count);
+
+		WARN_ON(!cid_count);
 		break;
 
 	default:
@@ -9555,7 +10202,7 @@
 		return -ENODEV;
 	}
 
-	cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
+	cid_count += FCOE_CONTEXT_USE;
 
 	/* dev zeroed in init_etherdev */
 	dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
@@ -9564,6 +10211,11 @@
 		return -ENOMEM;
 	}
 
+	/* We don't need a Tx queue for a CNIC and an OOO Rx-only ring,
+	 * so update a cid_count after a netdev allocation.
+	 */
+	cid_count += CNIC_CONTEXT_USE;
+
 	bp = netdev_priv(dev);
 	bp->msg_enable = debug;
 
@@ -9571,12 +10223,14 @@
 
 	bp->l2_cid_count = cid_count;
 
-	rc = bnx2x_init_dev(pdev, dev);
+	rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
 	if (rc < 0) {
 		free_netdev(dev);
 		return rc;
 	}
 
+	BNX2X_DEV_INFO("cid_count=%d\n", cid_count);
+
 	rc = bnx2x_init_bp(bp);
 	if (rc)
 		goto init_one_exit;
@@ -9713,12 +10367,17 @@
 
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
 
+#ifdef BCM_CNIC
+	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+	/* Stop Tx */
+	bnx2x_tx_disable(bp);
+
 	bnx2x_netif_stop(bp, 0);
-	netif_carrier_off(bp->dev);
 
 	del_timer_sync(&bp->timer);
-	bp->stats_state = STATS_STATE_DISABLED;
-	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
@@ -9733,6 +10392,8 @@
 
 	bp->state = BNX2X_STATE_CLOSED;
 
+	netif_carrier_off(bp->dev);
+
 	return 0;
 }
 
@@ -9845,8 +10506,8 @@
 	struct bnx2x *bp = netdev_priv(dev);
 
 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. "
-				"Try again later\n");
+		netdev_err(bp->dev, "Handling parity error recovery. "
+				    "Try again later\n");
 		return;
 	}
 
@@ -9905,10 +10566,33 @@
 	destroy_workqueue(bnx2x_wq);
 }
 
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+	REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
 module_init(bnx2x_init);
 module_exit(bnx2x_cleanup);
 
 #ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp:		driver handle
+ * @set:	set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+	unsigned long ramrod_flags = 0;
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+				 &bp->iscsi_l2_mac_obj, true,
+				 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
 
 /* count denotes the number of new completions we have seen */
 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
@@ -9929,23 +10613,22 @@
 		u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
 				& SPE_HDR_CONN_TYPE) >>
 				SPE_HDR_CONN_TYPE_SHIFT;
+		u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+				>> SPE_HDR_CMD_ID_SHIFT) & 0xff;
 
 		/* Set validation for iSCSI L2 client before sending SETUP
 		 *  ramrod
 		 */
 		if (type == ETH_CONNECTION_TYPE) {
-			u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
-					     hdr.conn_and_cmd_data) >>
-				SPE_HDR_CMD_ID_SHIFT) & 0xff;
-
 			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
-				bnx2x_set_ctx_validation(&bp->context.
-						vcxt[BNX2X_ISCSI_ETH_CID].eth,
-					HW_CID(bp, BNX2X_ISCSI_ETH_CID));
+				bnx2x_set_ctx_validation(bp, &bp->context.
+					vcxt[BNX2X_ISCSI_ETH_CID].eth,
+					BNX2X_ISCSI_ETH_CID);
 		}
 
-		/* There may be not more than 8 L2 and not more than 8 L5 SPEs
-		 * We also check that the number of outstanding
+		/*
+		 * There may be not more than 8 L2, not more than 8 L5 SPEs
+		 * and in the air. We also check that number of outstanding
 		 * COMMON ramrods is not more than the EQ and SPQ can
 		 * accommodate.
 		 */
@@ -10071,18 +10754,61 @@
 	return bnx2x_cnic_ctl_send(bp, &ctl);
 }
 
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
 {
-	struct cnic_ctl_info ctl;
+	struct cnic_ctl_info ctl = {0};
 
 	/* first we tell CNIC and only then we count this as a completion */
 	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
 	ctl.data.comp.cid = cid;
+	ctl.data.comp.error = err;
 
 	bnx2x_cnic_ctl_send_bh(bp, &ctl);
 	bnx2x_cnic_sp_post(bp, 0);
 }
 
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+	unsigned long accept_flags = 0, ramrod_flags = 0;
+	u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+	int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+	if (start) {
+		/* Start accepting on iSCSI L2 ring. Accept all multicasts
+		 * because it's the only way for UIO Queue to accept
+		 * multicasts (in non-promiscuous mode only one Queue per
+		 * function will receive multicast packets (leading in our
+		 * case).
+		 */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+		/* Clear STOP_PENDING bit if START is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+		sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+	} else
+		/* Clear START_PENDING bit if STOP is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(sched_state, &bp->sp_state);
+	else {
+		__set_bit(RAMROD_RX, &ramrod_flags);
+		bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+				    ramrod_flags);
+	}
+}
+
+
 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -10106,45 +10832,65 @@
 
 	/* rtnl_lock is held.  */
 	case DRV_CTL_START_L2_CMD: {
-		u32 cli = ctl->data.ring.client_id;
+		struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+		unsigned long sp_bits = 0;
 
-		/* Clear FCoE FIP and ALL ENODE MACs addresses first */
-		bnx2x_del_fcoe_eth_macs(bp);
+		/* Configure the iSCSI classification object */
+		bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+				   cp->iscsi_l2_client_id,
+				   cp->iscsi_l2_cid, BP_FUNC(bp),
+				   bnx2x_sp(bp, mac_rdata),
+				   bnx2x_sp_mapping(bp, mac_rdata),
+				   BNX2X_FILTER_MAC_PENDING,
+				   &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+				   &bp->macs_pool);
 
 		/* Set iSCSI MAC address */
-		bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+		rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+		if (rc)
+			break;
 
 		mmiowb();
 		barrier();
 
-		/* Start accepting on iSCSI L2 ring. Accept all multicasts
-		 * because it's the only way for UIO Client to accept
-		 * multicasts (in non-promiscuous mode only one Client per
-		 * function will receive multicast packets (leading in our
-		 * case).
-		 */
-		bnx2x_rxq_set_mac_filters(bp, cli,
-			BNX2X_ACCEPT_UNICAST |
-			BNX2X_ACCEPT_BROADCAST |
-			BNX2X_ACCEPT_ALL_MULTICAST);
-		storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+		/* Start accepting on iSCSI L2 ring */
+
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
 
 		break;
 	}
 
 	/* rtnl_lock is held.  */
 	case DRV_CTL_STOP_L2_CMD: {
-		u32 cli = ctl->data.ring.client_id;
+		unsigned long sp_bits = 0;
 
 		/* Stop accepting on iSCSI L2 ring */
-		bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
-		storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
 
 		mmiowb();
 		barrier();
 
 		/* Unset iSCSI L2 MAC */
-		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+		rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+					BNX2X_ISCSI_ETH_MAC, true);
 		break;
 	}
 	case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
@@ -10156,11 +10902,6 @@
 		break;
 	}
 
-	case DRV_CTL_ISCSI_STOPPED_CMD: {
-		bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
-		break;
-	}
-
 	default:
 		BNX2X_ERR("unknown command %x\n", ctl->cmd);
 		rc = -EINVAL;
@@ -10181,13 +10922,13 @@
 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 	}
-	if (CHIP_IS_E2(bp))
+	if (!CHIP_IS_E1x(bp))
 		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
 	else
 		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
 
-	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
-	cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
+	cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
+	cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
 	cp->irq_arr[1].status_blk = bp->def_status_blk;
 	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
 	cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
@@ -10204,9 +10945,6 @@
 	if (ops == NULL)
 		return -EINVAL;
 
-	if (atomic_read(&bp->intr_sem) != 0)
-		return -EBUSY;
-
 	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!bp->cnic_kwq)
 		return -ENOMEM;
@@ -10221,7 +10959,7 @@
 	bp->cnic_data = data;
 
 	cp->num_irq = 0;
-	cp->drv_state = CNIC_DRV_STATE_REGD;
+	cp->drv_state |= CNIC_DRV_STATE_REGD;
 	cp->iro_arr = bp->iro_arr;
 
 	bnx2x_setup_cnic_irq_info(bp);
@@ -10275,8 +11013,8 @@
 	cp->drv_register_cnic = bnx2x_register_cnic;
 	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
-	cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
-		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+	cp->iscsi_l2_client_id =
+		bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
 	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 
 	if (NO_ISCSI_OOO(bp))
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 86bba25..53da4ef 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -54,16 +54,20 @@
 /* [RW 10] The number of free blocks below which the full signal to class 0
  * is asserted */
 #define BRB1_REG_FULL_0_XOFF_THRESHOLD_0			 0x601d0
-/* [RW 10] The number of free blocks above which the full signal to class 0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1			 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
  * is de-asserted */
 #define BRB1_REG_FULL_0_XON_THRESHOLD_0				 0x601d4
-/* [RW 10] The number of free blocks below which the full signal to class 1
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1				 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
  * is asserted */
 #define BRB1_REG_FULL_1_XOFF_THRESHOLD_0			 0x601d8
-/* [RW 10] The number of free blocks above which the full signal to class 1
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1			 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
  * is de-asserted */
 #define BRB1_REG_FULL_1_XON_THRESHOLD_0				 0x601dc
-/* [RW 10] The number of free blocks below which the full signal to the LB
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1				 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
  * port is asserted */
 #define BRB1_REG_FULL_LB_XOFF_THRESHOLD				 0x601e0
 /* [RW 10] The number of free blocks above which the full signal to the LB
@@ -75,15 +79,49 @@
 /* [RW 10] The number of free blocks below which the High_llfc signal to
    interface #n is asserted. */
 #define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0			 0x6013c
-/* [RW 23] LL RAM data. */
-#define BRB1_REG_LL_RAM 					 0x61000
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED					 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST				 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM						 0x61000
 /* [RW 10] The number of free blocks above which the Low_llfc signal to
    interface #n is de-asserted. */
 #define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0			 0x6016c
 /* [RW 10] The number of free blocks below which the Low_llfc signal to
    interface #n is asserted. */
 #define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0			 0x6015c
-/* [RW 10] The number of blocks guarantied for the MAC port */
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED			 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST			 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED			 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST			 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED			 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST			 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED			 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST			 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
 #define BRB1_REG_MAC_GUARANTIED_0				 0x601e8
 #define BRB1_REG_MAC_GUARANTIED_1				 0x60240
 /* [R 24] The number of full blocks. */
@@ -100,15 +138,19 @@
 /* [RW 10] The number of free blocks below which the pause signal to class 0
  * is asserted */
 #define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0			 0x601c0
-/* [RW 10] The number of free blocks above which the pause signal to class 0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1			 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
  * is de-asserted */
 #define BRB1_REG_PAUSE_0_XON_THRESHOLD_0			 0x601c4
-/* [RW 10] The number of free blocks below which the pause signal to class 1
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1			 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
  * is asserted */
 #define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0			 0x601c8
-/* [RW 10] The number of free blocks above which the pause signal to class 1
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1			 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
  * is de-asserted */
 #define BRB1_REG_PAUSE_1_XON_THRESHOLD_0			 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1			 0x6022c
 /* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 			 0x60078
 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 			 0x6007c
@@ -422,6 +464,7 @@
 #define CFC_REG_NUM_LCIDS_ALLOC 				 0x104020
 /* [R 9] Number of Arriving LCIDs in Link List Block */
 #define CFC_REG_NUM_LCIDS_ARRIVING				 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF				 0x104120
 /* [R 9] Number of Leaving LCIDs in Link List Block */
 #define CFC_REG_NUM_LCIDS_LEAVING				 0x104018
 #define CFC_REG_WEAK_ENABLE_PF					 0x104124
@@ -783,6 +826,7 @@
 /* [RW 3] The number of simultaneous outstanding requests to Context Fetch
    Interface. */
 #define DORQ_REG_OUTST_REQ					 0x17003c
+#define DORQ_REG_PF_USAGE_CNT					 0x1701d0
 #define DORQ_REG_REGN						 0x170038
 /* [R 4] Current value of response A counter credit. Initial credit is
    configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
@@ -802,10 +846,12 @@
 /* [RW 28] TCM Header when both ULP and TCP context is loaded. */
 #define DORQ_REG_SHRT_CMHEAD					 0x170054
 #define HC_CONFIG_0_REG_ATTN_BIT_EN_0				 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0				 (0x1<<0)
 #define HC_CONFIG_0_REG_INT_LINE_EN_0				 (0x1<<3)
 #define HC_CONFIG_0_REG_MSI_ATTN_EN_0				 (0x1<<7)
 #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0			 (0x1<<2)
-#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 			 (0x1<<1)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0				 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1				 (0x1<<0)
 #define HC_REG_AGG_INT_0					 0x108050
 #define HC_REG_AGG_INT_1					 0x108054
 #define HC_REG_ATTN_BIT 					 0x108120
@@ -844,6 +890,7 @@
 #define HC_REG_VQID_0						 0x108008
 #define HC_REG_VQID_1						 0x10800c
 #define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN		 (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE		 (0x1<<0)
 #define IGU_REG_ATTENTION_ACK_BITS				 0x130108
 /* [R 4] Debug: attn_fsm */
 #define IGU_REG_ATTN_FSM					 0x130054
@@ -933,6 +980,14 @@
  * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
 #define IGU_REG_WRITE_DONE_PENDING				 0x130480
 #define MCP_A_REG_MCPR_SCRATCH					 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS					 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE					 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS					 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND				 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0				 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL				 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
 #define MCP_REG_MCPR_NVM_ACCESS_ENABLE				 0x86424
 #define MCP_REG_MCPR_NVM_ADDR					 0x8640c
 #define MCP_REG_MCPR_NVM_CFG4					 0x8642c
@@ -1429,11 +1484,37 @@
 /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
    only. */
 #define MISC_REG_E1HMF_MODE					 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP				 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+   the path_swap output is equal to 4 port mode path swap input pin; if it
+   is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR			 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP				 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+   the port_swap output is equal to 4 port mode port swap input pin; if it
+   is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR			 0xa734
 /* [RW 32] Debug only: spare RW register reset by core reset */
 #define MISC_REG_GENERIC_CR_0					 0xa460
 #define MISC_REG_GENERIC_CR_1					 0xa464
 /* [RW 32] Debug only: spare RW register reset by por reset */
 #define MISC_REG_GENERIC_POR_1					 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+   use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+   can not be configured as an output. Each output has its output enable in
+   the MCP register space; but this bit needs to be set to make use of that.
+   Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+   set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+   When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+   the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+   spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG					 0xa9a0
 /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
    these bits is written as a '1'; the corresponding SPIO bit will turn off
    it's drivers and become an input. This is the reset state of all GPIO
@@ -1636,6 +1717,14 @@
    in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
    timer 8 */
 #define MISC_REG_SW_TIMER_VAL					 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP				 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+   path_swap output is equal to 2 port mode path swap input pin; if it is 1
+   - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR			 0xa72c
 /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
    loaded; 0-prepare; -unprepare */
 #define MISC_REG_UNPREPARED					 0xa424
@@ -1644,6 +1733,36 @@
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR				 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+   side. This should be less than or equal to phy_port_mode; if some of the
+   ports are not used. This enables reduction of frequency on the core side.
+   This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+   Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+   input for the XMAC_MP core; and should be changed only while reset is
+   held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE				 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+   Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+   01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+   XMAC_MP core; and should be changed only while reset is held low. Reset
+   on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE				 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO				 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO				 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST	 (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST	 (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
 #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN			 (0x1<<0)
 #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN			 (0x1<<0)
 #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT	 (0x1<<0)
@@ -1837,6 +1956,10 @@
 #define NIG_REG_LLH1_FUNC_MEM					 0x161c0
 #define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
 #define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE					 0x18614
 /* [RW 8] init credit counter for port1 in LLH */
 #define NIG_REG_LLH1_XCM_INIT_CREDIT				 0x10564
 #define NIG_REG_LLH1_XCM_MASK					 0x10134
@@ -1858,11 +1981,25 @@
 /* [R 32] Interrupt register #0 read */
 #define NIG_REG_NIG_INT_STS_0					 0x103b0
 #define NIG_REG_NIG_INT_STS_1					 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK					 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0					 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1					 0x183d8
 /* [R 32] Legacy E1 and E1H location for parity error status register. */
 #define NIG_REG_NIG_PRTY_STS					 0x103d0
 /* [R 32] Parity register #0 read */
 #define NIG_REG_NIG_PRTY_STS_0					 0x183bc
 #define NIG_REG_NIG_PRTY_STS_1					 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR				 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0				 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1				 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE					 (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT			 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT			 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT		 8
 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
  * Ethernet header. */
 #define NIG_REG_P0_HDRS_AFTER_BASIC				 0x18038
@@ -1872,6 +2009,12 @@
 #define NIG_REG_P0_HWPFC_ENABLE				 0x18078
 #define NIG_REG_P0_LLH_FUNC_MEM2				 0x18480
 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE			 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN					 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN					 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN				 0x185b4
 /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
  * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
  * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
@@ -1888,11 +2031,52 @@
  * than one bit may be set; allowing multiple priorities to be mapped to one
  * COS. */
 #define NIG_REG_P0_RX_COS1_PRIORITY_MASK			 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK			 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK			 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK			 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK			 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
 /* [RW 15] Specify which of the credit registers the client is to be mapped
  * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
  * clients that are not subject to WFQ credit blocking - their
  * specifications here are not used. */
 #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP			 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x1868c
 /* [RW 5] Specify whether the client competes directly in the strict
  * priority arbiter. The bits are mapped according to client ID (client IDs
  * are defined in tx_arb_priority_client). Default value is set to enable
@@ -1907,10 +2091,24 @@
  * reach. */
 #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0			 0x1810c
 #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1			 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4			 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6			 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7			 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8			 0x186ac
 /* [RW 32] Specify the weight (in bytes) to be added to credit register 0
  * when it is time to increment. */
 #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0			 0x180f8
 #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1			 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2			 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3			 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4			 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5			 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6			 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7			 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8			 0x1869c
 /* [RW 12] Specify the number of strict priority arbitration slots between
  * two round-robin arbitration slots to avoid starvation. A value of 0 means
  * no strict priority cycles - the strict priority with anti-starvation
@@ -1925,8 +2123,36 @@
  * for management at priority 0; debug traffic at priorities 1 and 2; COS0
  * traffic at priority 3; and COS1 traffic at priority 4. */
 #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT			 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC				 0x1818c
 #define NIG_REG_P1_LLH_FUNC_MEM2				 0x184c0
 #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB			 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB			 0x18684
+#define NIG_REG_P1_MAC_IN_EN					 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN					 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN				 0x185c8
 /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
  * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
  * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
@@ -1943,6 +2169,105 @@
  * than one bit may be set; allowing multiple priorities to be mapped to one
  * COS. */
 #define NIG_REG_P1_RX_COS1_PRIORITY_MASK			 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK			 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY				 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY				 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT			 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ			 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0			 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1			 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4			 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0			 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1			 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2			 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3			 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4			 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5			 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+   two round-robin arbitration slots to avoid starvation. A value of 0 means
+   no strict priority cycles - the strict priority with anti-starvation
+   arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS			 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB			 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB			 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY				 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+   forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY			 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
 /* [RW 1] Pause enable for port0. This register may get 1 only when
    ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
    port */
@@ -2026,12 +2351,45 @@
 #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
 /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
 #define PBF_REG_COS0_UPPER_BOUND				 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0				 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1				 0x15c2e4
 /* [RW 31] The weight of COS0 in the ETS command arbiter. */
 #define PBF_REG_COS0_WEIGHT					 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0					 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1					 0x15c2c0
 /* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
 #define PBF_REG_COS1_UPPER_BOUND				 0x15c060
 /* [RW 31] The weight of COS1 in the ETS command arbiter. */
 #define PBF_REG_COS1_WEIGHT					 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0					 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1					 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0					 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1					 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0					 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0					 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0					 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q					 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0					 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1					 0x140340
 /* [RW 1] Disable processing further tasks from port 0 (after ending the
    current task in process). */
 #define PBF_REG_DISABLE_NEW_TASK_PROC_P0			 0x14005c
@@ -2042,6 +2400,52 @@
    current task in process). */
 #define PBF_REG_DISABLE_NEW_TASK_PROC_P4			 0x14006c
 #define PBF_REG_DISABLE_PF					 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0			 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1			 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0			 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1			 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0		 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1		 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0			 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1			 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0			 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1			 0x15c274
 /* [RW 1] Indicates that ETS is performed between the COSes in the command
  * arbiter. If reset strict priority w/ anti-starvation will be performed
  * w/o WFQ. */
@@ -2049,14 +2453,25 @@
 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
  * Ethernet header. */
 #define PBF_REG_HDRS_AFTER_BASIC				 0x15c0a8
-/* [RW 1] Indicates which COS is conncted to the highest priority in the
- * command arbiter. */
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0				 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
 #define PBF_REG_HIGH_PRIORITY_COS_NUM				 0x15c04c
 #define PBF_REG_IF_ENABLE_REG					 0x140044
 /* [RW 1] Init bit. When set the initial credits are copied to the credit
    registers (except the port credits). Should be set and then reset after
    the configuration of the block has ended. */
 #define PBF_REG_INIT						 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q					 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0					 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1					 0x15c234
 /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
    copied to the credit register. Should be set and then reset after the
    configuration of the port has ended. */
@@ -2069,6 +2484,15 @@
    copied to the credit register. Should be set and then reset after the
    configuration of the port has ended. */
 #define PBF_REG_INIT_P4 					 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q			 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0			 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1			 0x14035c
 /* [RW 1] Enable for mac interface 0. */
 #define PBF_REG_MAC_IF0_ENABLE					 0x140030
 /* [RW 1] Enable for mac interface 1. */
@@ -2089,24 +2513,49 @@
 /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
    lines. */
 #define PBF_REG_P0_INIT_CRD					 0x1400d0
-/* [RW 1] Indication that pause is enabled for port 0. */
-#define PBF_REG_P0_PAUSE_ENABLE 				 0x140014
-/* [R 8] Number of tasks in port 0 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT			 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE					 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
 #define PBF_REG_P0_TASK_CNT					 0x140204
-/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT				 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY					 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
 #define PBF_REG_P1_CREDIT					 0x140208
-/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
-   lines. */
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
 #define PBF_REG_P1_INIT_CRD					 0x1400d4
-/* [R 8] Number of tasks in port 1 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT			 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
 #define PBF_REG_P1_TASK_CNT					 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT				 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY					 0x140300
 /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
 #define PBF_REG_P4_CREDIT					 0x140210
 /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
    lines. */
 #define PBF_REG_P4_INIT_CRD					 0x1400e0
-/* [R 8] Number of tasks in port 4 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT			 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
 #define PBF_REG_P4_TASK_CNT					 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT				 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY					 0x140304
 /* [RW 5] Interrupt mask register #0 read/write */
 #define PBF_REG_PBF_INT_MASK					 0x1401d4
 /* [R 5] Interrupt register #0 read */
@@ -2115,6 +2564,27 @@
 #define PBF_REG_PBF_PRTY_MASK					 0x1401e4
 /* [RC 20] Parity register #0 read clear */
 #define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0					 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0					 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q				 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0				 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1				 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q				 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
 #define PB_REG_CONTROL						 0
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
@@ -2444,10 +2914,24 @@
 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
  * Ethernet header. */
 #define PRS_REG_HDRS_AFTER_BASIC				 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0				 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1				 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0				 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0				 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1				 0x402a0
 /* [RW 4] The increment value to send in the CFC load request message */
 #define PRS_REG_INC_VALUE					 0x40048
 /* [RW 6] Bit-map indicating which headers must appear in the packet */
 #define PRS_REG_MUST_HAVE_HDRS					 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0				 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1				 0x402ac
 #define PRS_REG_NIC_MODE					 0x40138
 /* [RW 8] The 8-bit event ID for cases where there is no match on the
    connection. Used in packet start message to TCM. */
@@ -2496,6 +2980,11 @@
 #define PRS_REG_SERIAL_NUM_STATUS_MSB				 0x40158
 /* [R 4] debug only: SRC current credit. Transaction based. */
 #define PRS_REG_SRC_CURRENT_CREDIT				 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0					 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0					 0x4022c
 /* [R 8] debug only: TCM current credit. Cycle based. */
 #define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
@@ -3080,6 +3569,7 @@
 #define QM_REG_BYTECREDITAFULLTHR				 0x168094
 /* [RW 4] The initial credit for interface */
 #define QM_REG_CMINITCRD_0					 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0					 0x16e6e8
 #define QM_REG_CMINITCRD_1					 0x1680d0
 #define QM_REG_CMINITCRD_2					 0x1680d4
 #define QM_REG_CMINITCRD_3					 0x1680d8
@@ -3170,7 +3660,10 @@
 /* [RW 2] The PCI attributes field used in the PCI request. */
 #define QM_REG_PCIREQAT 					 0x168054
 #define QM_REG_PF_EN						 0x16e70c
-/* [R 16] The byte credit of port 0 */
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0					 0x16e040
+/* [R 16] NOT USED */
 #define QM_REG_PORT0BYTECRD					 0x168300
 /* [R 16] The byte credit of port 1 */
 #define QM_REG_PORT1BYTECRD					 0x168304
@@ -3782,6 +4275,8 @@
 #define TM_REG_LIN0_LOGIC_ADDR					 0x164240
 /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
 #define TM_REG_LIN0_MAX_ACTIVE_CID				 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS					 0x1640a0
 /* [WB 64] Linear0 phy address. */
 #define TM_REG_LIN0_PHY_ADDR					 0x164270
 /* [RW 1] Linear0 physical address valid. */
@@ -3789,6 +4284,7 @@
 #define TM_REG_LIN0_SCAN_ON					 0x1640d0
 /* [RW 24] Linear0 array scan timeout. */
 #define TM_REG_LIN0_SCAN_TIME					 0x16403c
+#define TM_REG_LIN0_VNIC_UC					 0x164128
 /* [RW 32] Linear1 logic address. */
 #define TM_REG_LIN1_LOGIC_ADDR					 0x164250
 /* [WB 64] Linear1 phy address. */
@@ -4175,6 +4671,8 @@
 #define UCM_REG_UCM_INT_MASK					 0xe01d4
 /* [R 11] Interrupt register #0 read */
 #define UCM_REG_UCM_INT_STS					 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK					 0xe01e4
 /* [R 27] Parity register #0 read */
 #define UCM_REG_UCM_PRTY_STS					 0xe01d8
 /* [RC 27] Parity register #0 read clear */
@@ -4265,6 +4763,17 @@
    The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
    header pointer. */
 #define UCM_REG_XX_TABLE					 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA			 (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK			 (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN				 (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN			 (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA				 (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET			 (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA				 (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG					 0x8
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR						 0x14
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define USDM_REG_AGG_INT_EVENT_0				 0xc4038
 #define USDM_REG_AGG_INT_EVENT_1				 0xc403c
@@ -4696,8 +5205,13 @@
 #define XCM_REG_XCM_INT_MASK					 0x202b4
 /* [R 14] Interrupt register #0 read */
 #define XCM_REG_XCM_INT_STS					 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK					 0x202c4
 /* [R 30] Parity register #0 read */
 #define XCM_REG_XCM_PRTY_STS					 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR				 0x202bc
+
 /* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -4772,6 +5286,28 @@
 #define XCM_REG_XX_MSG_NUM					 0x20428
 /* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
 #define XCM_REG_XX_OVFL_EVNT_ID 				 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS	 (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS	 (0x1<<1)
+#define XMAC_CTRL_REG_CORE_LOCAL_LPBK				 (0x1<<3)
+#define XMAC_CTRL_REG_RX_EN					 (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN					 (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN				 (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN				 (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN			 (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN			 (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN				 (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN				 (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS				 0x60
+#define XMAC_REG_CTRL						 0
+#define XMAC_REG_PAUSE_CTRL					 0x68
+#define XMAC_REG_PFC_CTRL					 0x70
+#define XMAC_REG_PFC_CTRL_HI					 0x74
+#define XMAC_REG_RX_LSS_STATUS					 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE					 0x40
+#define XMAC_REG_TX_CTRL					 0x20
 /* [RW 16] Indirect access to the XX table of the XX protection mechanism.
    The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
    header pointer. */
@@ -4846,6 +5382,10 @@
 #define XSDM_REG_NUM_OF_Q9_CMD					 0x166268
 /* [RW 13] The start address in the internal RAM for queue counters */
 #define XSDM_REG_Q_COUNTER_START_ADDR				 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN					 0x1664c4
 /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
 #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x166548
 /* [R 1] parser fifo empty in sdm_sync block */
@@ -5019,6 +5559,7 @@
 #define BIGMAC_REGISTER_CNT_MAX_SIZE				 (0x05<<3)
 #define BIGMAC_REGISTER_RX_CONTROL				 (0x21<<3)
 #define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS			 (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS				 (0x43<<3)
 #define BIGMAC_REGISTER_RX_MAX_SIZE				 (0x23<<3)
 #define BIGMAC_REGISTER_RX_STAT_GR64				 (0x26<<3)
 #define BIGMAC_REGISTER_RX_STAT_GRIPJ				 (0x42<<3)
@@ -5034,6 +5575,7 @@
 #define BIGMAC2_REGISTER_PFC_CONTROL				 (0x06<<3)
 #define BIGMAC2_REGISTER_RX_CONTROL				 (0x3A<<3)
 #define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS			 (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT				 (0x3E<<3)
 #define BIGMAC2_REGISTER_RX_MAX_SIZE				 (0x3C<<3)
 #define BIGMAC2_REGISTER_RX_STAT_GR64				 (0x40<<3)
 #define BIGMAC2_REGISTER_RX_STAT_GRIPJ				 (0x5f<<3)
@@ -5052,7 +5594,9 @@
 #define EMAC_LED_OVERRIDE					 (1L<<0)
 #define EMAC_LED_TRAFFIC					 (1L<<6)
 #define EMAC_MDIO_COMM_COMMAND_ADDRESS				 (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22				 (2L<<26)
 #define EMAC_MDIO_COMM_COMMAND_READ_45				 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22				 (1L<<26)
 #define EMAC_MDIO_COMM_COMMAND_WRITE_45 			 (1L<<26)
 #define EMAC_MDIO_COMM_DATA					 (0xffffL<<0)
 #define EMAC_MDIO_COMM_START_BUSY				 (1L<<29)
@@ -5128,16 +5672,24 @@
 #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
 #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
 #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1			 (0x1<<25)
 #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0			 (0x1<<0)
 #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE		 (0x1<<14)
 #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE		 (0x1<<15)
 #define MISC_REGISTERS_RESET_REG_2_RST_GRC			 (0x1<<4)
 #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B	 (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE	 (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU	 (0x1<<7)
 #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
 #define MISC_REGISTERS_RESET_REG_2_RST_MDIO			 (0x1<<13)
 #define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE		 (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO			 (0x1<<13)
 #define MISC_REGISTERS_RESET_REG_2_RST_RBCN			 (0x1<<9)
 #define MISC_REGISTERS_RESET_REG_2_SET				 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0			 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_XMAC				 (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT			 (0x1<<23)
 #define MISC_REGISTERS_RESET_REG_3_CLEAR			 0x5a8
 #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ	 (0x1<<1)
 #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN	 (0x1<<2)
@@ -5160,74 +5712,86 @@
 #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 			 1
 #define MISC_REGISTERS_SPIO_OUTPUT_LOW				 0
 #define MISC_REGISTERS_SPIO_SET_POS				 8
+#define HW_LOCK_DRV_FLAGS					 10
 #define HW_LOCK_MAX_RESOURCE_VALUE				 31
 #define HW_LOCK_RESOURCE_GPIO					 1
 #define HW_LOCK_RESOURCE_MDIO					 0
-#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 			 3
-#define HW_LOCK_RESOURCE_RESERVED_08				 8
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK				 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0			 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1			 9
 #define HW_LOCK_RESOURCE_SPIO					 2
 #define HW_LOCK_RESOURCE_UNDI					 5
-#define PRS_FLAG_OVERETH_IPV4					 1
-#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT		      (0x1<<4)
-#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR		      (0x1<<5)
-#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR		      (1<<18)
-#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT		      (1<<31)
-#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT		      (1<<9)
-#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR		      (1<<8)
-#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT		      (1<<7)
-#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR		      (1<<6)
-#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT		      (1<<29)
-#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR		      (1<<28)
-#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT 	      (1<<1)
-#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR 	      (1<<0)
-#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR 	      (1<<18)
-#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT		      (1<<11)
-#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT	      (1<<13)
-#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR	      (1<<12)
-#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0		      (1<<5)
-#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1		      (1<<9)
-#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR		      (1<<12)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY	      (1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY	      (1<<31)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY	      (1<<29)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY	      (1<<30)
-#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT		      (1<<15)
-#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR		      (1<<14)
-#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR	      (1<<20)
-#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR	      (1<<0)
-#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT		      (1<<31)
-#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT	      (0x1<<2)
-#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR	      (0x1<<3)
-#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT		      (1<<3)
-#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR		      (1<<2)
-#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT   (1<<5)
-#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR   (1<<4)
-#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT		      (1<<3)
-#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR		      (1<<2)
-#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR	      (1<<22)
-#define AEU_INPUTS_ATTN_BITS_SPIO5			      (1<<15)
-#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT		      (1<<27)
-#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT	      (1<<5)
-#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT		      (1<<25)
-#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR		      (1<<24)
-#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT 	      (1<<29)
-#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR 	      (1<<28)
-#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT		      (1<<23)
-#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT		      (1<<27)
-#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR		      (1<<26)
-#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT		      (1<<21)
-#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR		      (1<<20)
-#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT 	      (1<<25)
-#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR 	      (1<<24)
-#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR       (1<<16)
-#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT		      (1<<9)
-#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT		      (1<<7)
-#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR		      (1<<6)
-#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT 	      (1<<11)
-#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR 	      (1<<10)
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT			 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR			 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT			 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR			 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT			 (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR			 (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT		 (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR		 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR			 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY		 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY		 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT			 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR		 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT	 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR	 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR		 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5				 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT		 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR		 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT			 (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR			 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT			 (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR			 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR		 (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR			 (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0			(0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1			(0x1<<9)
+
 #define RESERVED_GENERAL_ATTENTION_BIT_0	0
 
-#define EVEREST_GEN_ATTN_IN_USE_MASK		0x3ffe0
+#define EVEREST_GEN_ATTN_IN_USE_MASK		0x7ffe0
 #define EVEREST_LATCHED_ATTN_IN_USE_MASK	0xffe00000
 
 #define RESERVED_GENERAL_ATTENTION_BIT_6	6
@@ -5317,7 +5881,13 @@
 #define GRCBASE_HC		0x108000
 #define GRCBASE_PXP2		0x120000
 #define GRCBASE_PBF		0x140000
+#define GRCBASE_UMAC0		0x160000
+#define GRCBASE_UMAC1		0x160400
 #define GRCBASE_XPB		0x161000
+#define GRCBASE_MSTAT0	    0x162000
+#define GRCBASE_MSTAT1	    0x162800
+#define GRCBASE_XMAC0		0x163000
+#define GRCBASE_XMAC1		0x163800
 #define GRCBASE_TIMERS		0x164000
 #define GRCBASE_XSDM		0x166000
 #define GRCBASE_QM		0x168000
@@ -5883,6 +6453,10 @@
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G		0x0C00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX	0x0D00
 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4	0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR	0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI	0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS	0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI	0x1F00
 
 
 #define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
@@ -6036,11 +6610,6 @@
 /*bcm*/
 #define MDIO_PMA_REG_BCM_CTRL		0x0096
 #define MDIO_PMA_REG_FEC_CTRL		0x00ab
-#define MDIO_PMA_REG_RX_ALARM_CTRL	0x9000
-#define MDIO_PMA_REG_LASI_CTRL		0x9002
-#define MDIO_PMA_REG_RX_ALARM		0x9003
-#define MDIO_PMA_REG_TX_ALARM		0x9004
-#define MDIO_PMA_REG_LASI_STATUS	0x9005
 #define MDIO_PMA_REG_PHY_IDENTIFIER	0xc800
 #define MDIO_PMA_REG_DIGITAL_CTRL	0xc808
 #define MDIO_PMA_REG_DIGITAL_STATUS	0xc809
@@ -6201,6 +6770,153 @@
 #define MDIO_PMA_REG_84823_CTL_LED_CTL_1		0xa8e3
 #define MDIO_PMA_REG_84823_LED3_STRETCH_EN		0x0080
 
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
+#define MDIO_84833_SUPER_ISOLATE		0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0			0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1			0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2			0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3			0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4			0x4009
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE		0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED				0x0001
+#define PHY84833_CMD_IN_PROGRESS			0x0002
+#define PHY84833_CMD_COMPLETE_PASS			0x0004
+#define PHY84833_CMD_COMPLETE_ERROR			0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS			0x0010
+#define PHY84833_CMD_SYSTEM_BOOT			0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS			0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE			0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE			0xa5a5
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD					0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL			0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP			0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0	0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1	0x11
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW			0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0			0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1			0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2			0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0			0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0			0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0			0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0			0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER			0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET		0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK			0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET		0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK				0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET		0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK			0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER			0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER			0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER			0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G			0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G			0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL			0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL			0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104
+#define MDIO_WC_REG_XGXS_STATUS3			0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL			0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2			0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1			0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1			0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0			0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1			0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2			0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3			0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4			0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP		0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION			0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE		0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET	0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT	    0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR	    0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC	    0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI	    0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G	    0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET	0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET	0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET	0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC			0x81FE
+#define MDIO_WC_REG_DSC_SMC				0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0		0x821e
+#define MDIO_WC_REG_TX_FIR_TAP				0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET		0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK			0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET		0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK		0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET		0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK		0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE		0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL	0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL	0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL	0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL	0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL	0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1		0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2		0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3		0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1		0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1			0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2			0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1			0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3			0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6			0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7			0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED		0x834e
+#define MDIO_WC_REG_CL49_USERB0_CTRL			0x8368
+#define MDIO_WC_REG_TX66_CONTROL			0x83b0
+#define MDIO_WC_REG_RX66_CONTROL			0x83c0
+#define MDIO_WC_REG_RX66_SCW0				0x83c2
+#define MDIO_WC_REG_RX66_SCW1				0x83c3
+#define MDIO_WC_REG_RX66_SCW2				0x83c4
+#define MDIO_WC_REG_RX66_SCW3				0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK			0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK			0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK			0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK			0x83c9
+#define MDIO_WC_REG_FX100_CTRL1				0x8400
+#define MDIO_WC_REG_FX100_CTRL3				0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD			0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS			0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3			0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER				0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL			0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT		0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET			0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT	0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT	4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2		0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK			0x003f
+
+/* 54616s */
+#define MDIO_REG_INTR_STATUS				0x1a
+#define MDIO_REG_INTR_MASK				0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS			(0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW				0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2			(0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA			(0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED		(0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD		(0x1 << 8)
+
 #define IGU_FUNC_BASE			0x0400
 
 #define IGU_ADDR_MSIX			0x0000
@@ -6217,11 +6933,6 @@
 #define IGU_ADDR_MSI_ADDR_HI	0x0212
 #define IGU_ADDR_MSI_DATA		0x0213
 
-#define IGU_INT_ENABLE			0
-#define IGU_INT_DISABLE 		1
-#define IGU_INT_NOP				2
-#define IGU_INT_NOP2			3
-
 #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup  0
 #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup  1
 #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup  2
@@ -6292,15 +7003,6 @@
 #define IGU_BC_BASE_DSB_PROD   128
 #define IGU_NORM_BASE_DSB_PROD 136
 
-#define IGU_CTRL_CMD_TYPE_WR\
-	1
-#define IGU_CTRL_CMD_TYPE_RD\
-	0
-
-#define IGU_SEG_ACCESS_NORM   0
-#define IGU_SEG_ACCESS_DEF    1
-#define IGU_SEG_ACCESS_ATTN   2
-
 	/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
 	[5:2] = 0; [1:0] = PF number) */
 #define IGU_FID_ENCODE_IS_PF	    (0x1<<6)
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c
new file mode 100644
index 0000000..5bdf094
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.c
@@ -0,0 +1,5333 @@
+/* bnx2x_sp.c: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI		16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @o:		poiter to the object
+ * @exe_len:	length
+ * @owner:	poiter to the owner
+ * @validate:	validate function pointer
+ * @optimize:	optimize function pointer
+ * @exec:	execute function pointer
+ * @get:	get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+					struct bnx2x_exe_queue_obj *o,
+					int exe_len,
+					union bnx2x_qable_obj *owner,
+					exe_q_validate validate,
+					exe_q_optimize optimize,
+					exe_q_execute exec,
+					exe_q_get get)
+{
+	memset(o, 0, sizeof(*o));
+
+	INIT_LIST_HEAD(&o->exe_queue);
+	INIT_LIST_HEAD(&o->pending_comp);
+
+	spin_lock_init(&o->lock);
+
+	o->exe_chunk_len = exe_len;
+	o->owner         = owner;
+
+	/* Owner specific callbacks */
+	o->validate      = validate;
+	o->optimize      = optimize;
+	o->execute       = exec;
+	o->get           = get;
+
+	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
+			 "length of %d\n", exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+					     struct bnx2x_exeq_elem *elem)
+{
+	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+	kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+	int cnt = 0;
+
+	spin_lock_bh(&o->lock);
+
+	list_for_each_entry(elem, &o->exe_queue, link)
+		cnt++;
+
+	spin_unlock_bh(&o->lock);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp:		driver handle
+ * @o:		queue
+ * @cmd:	new command to add
+ * @restore:	true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+				      struct bnx2x_exe_queue_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      bool restore)
+{
+	int rc;
+
+	spin_lock_bh(&o->lock);
+
+	if (!restore) {
+		/* Try to cancel this element queue */
+		rc = o->optimize(bp, o->owner, elem);
+		if (rc)
+			goto free_and_exit;
+
+		/* Check if this request is ok */
+		rc = o->validate(bp, o->owner, elem);
+		if (rc) {
+			BNX2X_ERR("Preamble failed: %d\n", rc);
+			goto free_and_exit;
+		}
+	}
+
+	/* If so, add it to the execution queue */
+	list_add_tail(&elem->link, &o->exe_queue);
+
+	spin_unlock_bh(&o->lock);
+
+	return 0;
+
+free_and_exit:
+	bnx2x_exe_queue_free_elem(bp, elem);
+
+	spin_unlock_bh(&o->lock);
+
+	return rc;
+
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+	struct bnx2x *bp,
+	struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+
+	while (!list_empty(&o->pending_comp)) {
+		elem = list_first_entry(&o->pending_comp,
+					struct bnx2x_exeq_elem, link);
+
+		list_del(&elem->link);
+		bnx2x_exe_queue_free_elem(bp, elem);
+	}
+}
+
+static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
+						 struct bnx2x_exe_queue_obj *o)
+{
+
+	spin_lock_bh(&o->lock);
+
+	__bnx2x_exe_queue_reset_pending(bp, o);
+
+	spin_unlock_bh(&o->lock);
+
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp:			driver handle
+ * @o:			queue
+ * @ramrod_flags:	flags
+ *
+ * (Atomicy is ensured using the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+				       struct bnx2x_exe_queue_obj *o,
+				       unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem, spacer;
+	int cur_len = 0, rc;
+
+	memset(&spacer, 0, sizeof(spacer));
+
+	spin_lock_bh(&o->lock);
+
+	/*
+	 * Next step should not be performed until the current is finished,
+	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+	 * properly clear object internals without sending any command to the FW
+	 * which also implies there won't be any completion to clear the
+	 * 'pending' list.
+	 */
+	if (!list_empty(&o->pending_comp)) {
+		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+					 "resetting pending_comp\n");
+			__bnx2x_exe_queue_reset_pending(bp, o);
+		} else {
+			spin_unlock_bh(&o->lock);
+			return 1;
+		}
+	}
+
+	/*
+	 * Run through the pending commands list and create a next
+	 * execution chunk.
+	 */
+	while (!list_empty(&o->exe_queue)) {
+		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+					link);
+		WARN_ON(!elem->cmd_len);
+
+		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+			cur_len += elem->cmd_len;
+			/*
+			 * Prevent from both lists being empty when moving an
+			 * element. This will allow the call of
+			 * bnx2x_exe_queue_empty() without locking.
+			 */
+			list_add_tail(&spacer.link, &o->pending_comp);
+			mb();
+			list_del(&elem->link);
+			list_add_tail(&elem->link, &o->pending_comp);
+			list_del(&spacer.link);
+		} else
+			break;
+	}
+
+	/* Sanity check */
+	if (!cur_len) {
+		spin_unlock_bh(&o->lock);
+		return 0;
+	}
+
+	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+	if (rc < 0)
+		/*
+		 *  In case of an error return the commands back to the queue
+		 *  and reset the pending_comp.
+		 */
+		list_splice_init(&o->pending_comp, &o->exe_queue);
+	else if (!rc)
+		/*
+		 * If zero is returned, means there are no outstanding pending
+		 * completions and we may dismiss the pending list.
+		 */
+		__bnx2x_exe_queue_reset_pending(bp, o);
+
+	spin_unlock_bh(&o->lock);
+	return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+	bool empty = list_empty(&o->exe_queue);
+
+	/* Don't reorder!!! */
+	mb();
+
+	return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+	struct bnx2x *bp)
+{
+	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+	return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp:		device handle
+ * @state:	state which is to be cleared
+ * @state_p:	state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+				   unsigned long *pstate)
+{
+	/* can take a while if any port is running */
+	int cnt = 5000;
+
+
+	if (CHIP_REV_IS_EMUL(bp))
+		cnt *= 20;
+
+	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+	might_sleep();
+	while (cnt--) {
+		if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
+#endif
+			return 0;
+		}
+
+		usleep_range(1000, 1000);
+
+		if (bp->panic)
+			return -EIO;
+	}
+
+	/* timeout! */
+	BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+	bnx2x_panic();
+#endif
+
+	return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+	return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+			       union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	if (!is_valid_ether_addr(data->mac.mac))
+		return -EINVAL;
+
+	/* Check if a requested MAC already exists */
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+				union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return -EEXIST;
+
+	return 0;
+}
+
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+			    union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return pos;
+
+	return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+			     struct bnx2x_vlan_mac_obj *dst_o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	int rc;
+
+	/* Check if we can delete the requested configuration from the first
+	 * object.
+	 */
+	pos = src_o->check_del(src_o, data);
+
+	/*  check if configuration can be added */
+	rc = dst_o->check_add(dst_o, data);
+
+	/* If this classification can not be added (is already set)
+	 * or can't be deleted - return an error.
+	 */
+	if (rc || !pos)
+		return false;
+
+	return true;
+}
+
+static bool bnx2x_check_move_always_err(
+	struct bnx2x_vlan_mac_obj *src_o,
+	struct bnx2x_vlan_mac_obj *dst_o,
+	union bnx2x_classification_ramrod_data *data)
+{
+	return false;
+}
+
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+/* LLH CAM line allocations */
+enum {
+	LLH_CAM_ISCSI_ETH_LINE = 0,
+	LLH_CAM_ETH_LINE,
+	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+				 bool add, unsigned char *dev_addr, int index)
+{
+	u32 wb_data[2];
+	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+			 NIG_REG_LLH0_FUNC_MEM;
+
+	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+		return;
+
+	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+			 (add ? "ADD" : "DELETE"), index);
+
+	if (add) {
+		/* LLH_FUNC_MEM is a u64 WB register */
+		reg_offset += 8*index;
+
+		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+			      (dev_addr[4] <<  8) |  dev_addr[5]);
+		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
+
+		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+	}
+
+	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue for which we want to configure this rule
+ * @add:	if true the command is an ADD command, DEL otherwise
+ * @opcode:	CLASSIFY_RULE_OPCODE_XXX
+ * @hdr:	pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+	struct eth_classify_cmd_header *hdr)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	hdr->client_id = raw->cl_id;
+	hdr->func_id = raw->func_id;
+
+	/* Rx or/and Tx (internal switching) configuration ? */
+	hdr->cmd_general_data |=
+		bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+	if (add)
+		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+	hdr->cmd_general_data |=
+		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid:	connection id
+ * @type:	BNX2X_FILTER_XXX_PENDING
+ * @hdr:	poiter to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+				struct eth_classify_header *hdr, int rule_cnt)
+{
+	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+	hdr->rule_cnt = (u8)rule_cnt;
+}
+
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 struct bnx2x_exeq_elem *elem, int rule_idx,
+				 int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+	/*
+	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
+	 * relevant. In addition, current implementation is tuned for a
+	 * single ETH MAC.
+	 *
+	 * When multiple unicast ETH MACs PF configuration in switch
+	 * independent mode is required (NetQ, multiple netdev MACs,
+	 * etc.), consider better utilisation of 8 per function MAC
+	 * entries in the LLH register. There is also
+	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+	 * total number of CAM entries to 16.
+	 *
+	 * Currently we won't configure NIG for MACs other than a primary ETH
+	 * MAC and iSCSI L2 MAC.
+	 *
+	 * If this MAC is moving from one Queue to another, no need to change
+	 * NIG configuration.
+	 */
+	if (cmd != BNX2X_VLAN_MAC_MOVE) {
+		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac,
+					     LLH_CAM_ISCSI_ETH_LINE);
+		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+	}
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Setup a command header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+				      &rule_entry->mac.header);
+
+	DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
+			 "Queue %d\n", (add ? "add" : "delete"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
+
+	/* Set a MAC itself */
+	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+			      &rule_entry->mac.mac_mid,
+			      &rule_entry->mac.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_MAC,
+					      &rule_entry->mac.header);
+
+		/* Set a MAC itself */
+		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+				      &rule_entry->mac.mac_mid,
+				      &rule_entry->mac.mac_lsb, mac);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue
+ * @type:
+ * @cam_offset:	offset in cam memory
+ * @hdr:	pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+	struct mac_configuration_hdr *hdr)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	hdr->length = 1;
+	hdr->offset = (u8)cam_offset;
+	hdr->client_id = 0xff;
+	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	u32 cl_bit_vec = (1 << r->cl_id);
+
+	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+	cfg_entry->pf_id = r->func_id;
+	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+	if (add) {
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+		SET_FLAG(cfg_entry->flags,
+			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+		/* Set a MAC in a ramrod data */
+		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+				      &cfg_entry->middle_mac_addr,
+				      &cfg_entry->lsb_mac_addr, mac);
+	} else
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+					 &config->hdr);
+	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+					 cfg_entry);
+
+	DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
+			 (add ? "setting" : "clearing"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
+				     ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+				      &rule_entry->vlan.header);
+
+	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+			 vlan);
+
+	/* Set a VLAN itself */
+	rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_VLAN,
+					      &rule_entry->vlan.header);
+
+		/* Set a VLAN itself */
+		rule_entry->vlan.vlan = cpu_to_le16(vlan);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselvs */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp:		device handle
+ * @p:		command parameters
+ * @ppos:	pointer to the cooky
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
+ * into an account
+ *
+ * pointer to the cooky  - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p,
+			   struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+	/* If list is empty - there is nothing to do here */
+	if (list_empty(&o->head)) {
+		*ppos = NULL;
+		return 0;
+	}
+
+	/* make a step... */
+	if (*ppos == NULL)
+		*ppos = list_first_entry(&o->head,
+					 struct bnx2x_vlan_mac_registry_elem,
+					 link);
+	else
+		*ppos = list_next_entry(*ppos, link);
+
+	pos = *ppos;
+
+	/* If it's the last step - return NULL */
+	if (list_is_last(&pos->link, &o->head))
+		*ppos = NULL;
+
+	/* Prepare a 'user_req' */
+	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+	/* Set the command */
+	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+	/* Set vlan_mac_flags */
+	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+	/* Set a restore bit */
+	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, p);
+}
+
+/*
+ * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	int rc;
+
+	/* Check the registry */
+	rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+	if (rc) {
+		DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
+				 "current registry state\n");
+		return rc;
+	}
+
+	/*
+	 * Check if there is a pending ADD command for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
+	 */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * TODO: Check the pending MOVE from other objects where this
+	 * object is a destination object.
+	 */
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->get_credit(o)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check
+ * @elem:	element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem query_elem;
+
+	/* If this classification can not be deleted (doesn't exist)
+	 * - return a BNX2X_EXIST.
+	 */
+	pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+	if (!pos) {
+		DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
+				 "current registry state\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * Check if there are pending DEL or MOVE commands for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check for MOVE commands */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+	if (exeq->get(exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending MOVE command already\n");
+		return -EINVAL;
+	}
+
+	/* Check for DEL commands */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+		return -EEXIST;
+	}
+
+	/* Return the credit to the credit pool if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->put_credit(o))) {
+		BNX2X_ERR("Failed to return a credit\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check (source)
+ * @elem:	element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+					       union bnx2x_qable_obj *qo,
+					       struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+	struct bnx2x_exeq_elem query_elem;
+	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+	/*
+	 * Check if we can perform this operation based on the current registry
+	 * state.
+	 */
+	if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
+				 "current registry state\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check if there is an already pending DEL or MOVE command for the
+	 * source object or ADD command for a destination object. Return an
+	 * error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check DEL on source */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+	if (src_exeq->get(src_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending DEL command on the source "
+			  "queue already\n");
+		return -EINVAL;
+	}
+
+	/* Check MOVE on source */
+	if (src_exeq->get(src_exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+		return -EEXIST;
+	}
+
+	/* Check ADD on destination */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+	if (dest_exeq->get(dest_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending ADD command on the "
+			  "destination queue already\n");
+		return -EINVAL;
+	}
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    dest_o->get_credit(dest_o)))
+		return -EINVAL;
+
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    src_o->put_credit(src_o))) {
+		/* return the credit taken from dest... */
+		dest_o->put_credit(dest_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+	case BNX2X_VLAN_MAC_DEL:
+		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+	case BNX2X_VLAN_MAC_MOVE:
+		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o)
+{
+	int cnt = 5000, rc;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	while (cnt--) {
+		/* Wait for the current command to complete */
+		rc = raw->wait_comp(bp, raw);
+		if (rc)
+			return rc;
+
+		/* Wait until there are no pending commands */
+		if (!bnx2x_exe_queue_empty(exeq))
+			usleep_range(1000, 1000);
+		else
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont:	if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+				   struct bnx2x_vlan_mac_obj *o,
+				   union event_ring_elem *cqe,
+				   unsigned long *ramrod_flags)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc;
+
+	/* Reset pending list */
+	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+	/* Clear pending */
+	r->clear_pending(r);
+
+	/* If ramrod failed this is most likely a SW bug */
+	if (cqe->message.error)
+		return -EINVAL;
+
+	/* Run the next bulk of pending commands if requeted */
+	if (test_bit(RAMROD_CONT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* If there is more work to do return PENDING */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem query, *pos;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+	memcpy(&query, elem, sizeof(query));
+
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+		break;
+	case BNX2X_VLAN_MAC_DEL:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+		break;
+	default:
+		/* Don't handle anything other than ADD or DEL */
+		return 0;
+	}
+
+	/* If we found the appropriate element - delete it */
+	pos = exeq->get(exeq, &query);
+	if (pos) {
+
+		/* Return the credit of the optimized command */
+		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+			if ((query.cmd_data.vlan_mac.cmd ==
+			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+				BNX2X_ERR("Failed to return the credit for the "
+					  "optimized ADD command\n");
+				return -EINVAL;
+			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+				BNX2X_ERR("Failed to recover the credit from "
+					  "the optimized DEL command\n");
+				return -EINVAL;
+			}
+		}
+
+		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+			   "ADD" : "DEL");
+
+		list_del(&pos->link);
+		bnx2x_exe_queue_free_elem(bp, pos);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp:	  device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o,
+	struct bnx2x_exeq_elem *elem,
+	bool restore,
+	struct bnx2x_vlan_mac_registry_elem **re)
+{
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+	/* Allocate a new registry element if needed. */
+	if (!restore &&
+	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+		if (!reg_elem)
+			return -ENOMEM;
+
+		/* Get a new CAM offset */
+		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+			/*
+			 * This shell never happen, because we have checked the
+			 * CAM availiability in the 'validate'.
+			 */
+			WARN_ON(1);
+			kfree(reg_elem);
+			return -EINVAL;
+		}
+
+		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+		/* Set a VLAN-MAC data */
+		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+			  sizeof(reg_elem->u));
+
+		/* Copy the flags (needed for DEL and RESTORE flows) */
+		reg_elem->vlan_mac_flags =
+			elem->cmd_data.vlan_mac.vlan_mac_flags;
+	} else /* DEL, RESTORE */
+		reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+	*re = reg_elem;
+	return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp:			device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+				  union bnx2x_qable_obj *qo,
+				  struct list_head *exe_chunk,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc, idx = 0;
+	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+	int cmd;
+
+	/*
+	 * If DRIVER_ONLY execution is requested, cleanup a registry
+	 * and exit. Otherwise send a ramrod to FW.
+	 */
+	if (!drv_only) {
+		WARN_ON(r->check_pending(r));
+
+		/* Set pending */
+		r->set_pending(r);
+
+		/* Fill tha ramrod data */
+		list_for_each_entry(elem, exe_chunk, link) {
+			cmd = elem->cmd_data.vlan_mac.cmd;
+			/*
+			 * We will add to the target object in MOVE command, so
+			 * change the object for a CAM search.
+			 */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				cam_obj = elem->cmd_data.vlan_mac.target_obj;
+			else
+				cam_obj = o;
+
+			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+							      elem, restore,
+							      &reg_elem);
+			if (rc)
+				goto error_exit;
+
+			WARN_ON(!reg_elem);
+
+			/* Push a new entry into the registry */
+			if (!restore &&
+			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+			    (cmd == BNX2X_VLAN_MAC_MOVE)))
+				list_add(&reg_elem->link, &cam_obj->head);
+
+			/* Configure a single command in a ramrod data buffer */
+			o->set_one_rule(bp, o, elem, idx,
+					reg_elem->cam_offset);
+
+			/* MOVE command consumes 2 entries in the ramrod data */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				idx += 2;
+			else
+				idx++;
+		}
+
+		/* Commit the data writes towards the memory */
+		mb();
+
+		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+				   U64_HI(r->rdata_mapping),
+				   U64_LO(r->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			goto error_exit;
+	}
+
+	/* Now, when we are done with the ramrod - clean up the registry */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
+			reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+			WARN_ON(!reg_elem);
+
+			o->put_cam_offset(o, reg_elem->cam_offset);
+			list_del(&reg_elem->link);
+			kfree(reg_elem);
+		}
+	}
+
+	if (!drv_only)
+		return 1;
+	else
+		return 0;
+
+error_exit:
+	r->clear_pending(r);
+
+	/* Cleanup a registry in case of a failure */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+
+		if (cmd == BNX2X_VLAN_MAC_MOVE)
+			cam_obj = elem->cmd_data.vlan_mac.target_obj;
+		else
+			cam_obj = o;
+
+		/* Delete all newly added above entries */
+		if (!restore &&
+		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
+			reg_elem = o->check_del(cam_obj,
+						&elem->cmd_data.vlan_mac.u);
+			if (reg_elem) {
+				list_del(&reg_elem->link);
+				kfree(reg_elem);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	/* Allocate the execution queue element */
+	elem = bnx2x_exe_queue_alloc_elem(bp);
+	if (!elem)
+		return -ENOMEM;
+
+	/* Set the command 'length' */
+	switch (p->user_req.cmd) {
+	case BNX2X_VLAN_MAC_MOVE:
+		elem->cmd_len = 2;
+		break;
+	default:
+		elem->cmd_len = 1;
+	}
+
+	/* Fill the object specific info */
+	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+	/* Try to add a new command to the pending list */
+	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp:	  device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	int rc = 0;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	unsigned long *ramrod_flags = &p->ramrod_flags;
+	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	/*
+	 * Add new elements to the execution list for commands that require it.
+	 */
+	if (!cont) {
+		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * If nothing will be executed further in this iteration we want to
+	 * return PENDING if there are pending commands
+	 */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		rc = 1;
+
+	/* Execute commands if required */
+	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/*
+	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+	 * then user want to wait until the last command is done.
+	 */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		/*
+		 * Wait maximum for the current exe_queue length iterations plus
+		 * one (for the current pending command).
+		 */
+		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+		       max_iterations--) {
+
+			/* Wait for the current command to complete */
+			rc = raw->wait_comp(bp, raw);
+			if (rc)
+				return rc;
+
+			/* Make a next step */
+			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
+						  ramrod_flags);
+			if (rc < 0)
+				return rc;
+		}
+
+		return 0;
+	}
+
+	return rc;
+}
+
+
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp:			device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags:	execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * moreelements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  unsigned long *vlan_mac_flags,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+	int rc = 0;
+	struct bnx2x_vlan_mac_ramrod_params p;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+
+	/* Clear pending commands first */
+
+	spin_lock_bh(&exeq->lock);
+
+	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+		    *vlan_mac_flags)
+			list_del(&exeq_pos->link);
+	}
+
+	spin_unlock_bh(&exeq->lock);
+
+	/* Prepare a command request */
+	memset(&p, 0, sizeof(p));
+	p.vlan_mac_obj = o;
+	p.ramrod_flags = *ramrod_flags;
+	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+	/*
+	 * Add all but the last VLAN-MAC to the execution queue without actually
+	 * execution anything.
+	 */
+	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	list_for_each_entry(pos, &o->head, link) {
+		if (pos->vlan_mac_flags == *vlan_mac_flags) {
+			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+			rc = bnx2x_config_vlan_mac(bp, &p);
+			if (rc < 0) {
+				BNX2X_ERR("Failed to add a new DEL command\n");
+				return rc;
+			}
+		}
+	}
+
+	p.ramrod_flags = *ramrod_flags;
+	__set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+	unsigned long *pstate, bnx2x_obj_type type)
+{
+	raw->func_id = func_id;
+	raw->cid = cid;
+	raw->cl_id = cl_id;
+	raw->rdata = rdata;
+	raw->rdata_mapping = rdata_mapping;
+	raw->state = state;
+	raw->pstate = pstate;
+	raw->obj_type = type;
+	raw->check_pending = bnx2x_raw_check_pending;
+	raw->clear_pending = bnx2x_raw_clear_pending;
+	raw->set_pending = bnx2x_raw_set_pending;
+	raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+	int state, unsigned long *pstate, bnx2x_obj_type type,
+	struct bnx2x_credit_pool_obj *macs_pool,
+	struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	INIT_LIST_HEAD(&o->head);
+
+	o->macs_pool = macs_pool;
+	o->vlans_pool = vlans_pool;
+
+	o->delete_all = bnx2x_vlan_mac_del_all;
+	o->restore = bnx2x_vlan_mac_restore;
+	o->complete = bnx2x_complete_vlan_mac;
+	o->wait = bnx2x_wait_vlan_mac;
+
+	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+			   state, pstate, type);
+}
+
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, NULL);
+
+	/* CAM credit pool handling */
+	mac_obj->get_credit = bnx2x_get_credit_mac;
+	mac_obj->put_credit = bnx2x_put_credit_mac;
+	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1x(bp)) {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move_always_err;
+		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	} else {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move;
+		mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	}
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type, NULL,
+				   vlans_pool);
+
+	vlan_obj->get_credit = bnx2x_get_credit_vlan;
+	vlan_obj->put_credit = bnx2x_put_credit_vlan;
+	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_ERR("Do not support chips others than E2 and newer\n");
+		BUG();
+	} else {
+		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
+		vlan_obj->check_del         = bnx2x_check_vlan_del;
+		vlan_obj->check_add         = bnx2x_check_vlan_add;
+		vlan_obj->check_move        = bnx2x_check_move;
+		vlan_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan);
+	}
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/*
+	 * CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+			struct tstorm_eth_mac_filter_config *mac_filters,
+			u16 pf_id)
+{
+	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+				 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* update the bp MAC filter structure  */
+	u32 mask = (1 << p->cl_id);
+
+	struct tstorm_eth_mac_filter_config *mac_filters =
+		(struct tstorm_eth_mac_filter_config *)p->rdata;
+
+	/* initial seeting is drop-all */
+	u8 drop_all_ucast = 1, drop_all_mcast = 1;
+	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+	u8 unmatched_unicast = 0;
+
+    /* In e1x there we only take into account rx acceot flag since tx switching
+     * isn't enabled. */
+	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+		/* accept matched ucast */
+		drop_all_ucast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+		/* accept matched mcast */
+		drop_all_mcast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_ucast = 0;
+		accp_all_ucast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_mcast = 0;
+		accp_all_mcast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+		/* accept (all) bcast */
+		accp_all_bcast = 1;
+	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+		/* accept unmatched unicasts */
+		unmatched_unicast = 1;
+
+	mac_filters->ucast_drop_all = drop_all_ucast ?
+		mac_filters->ucast_drop_all | mask :
+		mac_filters->ucast_drop_all & ~mask;
+
+	mac_filters->mcast_drop_all = drop_all_mcast ?
+		mac_filters->mcast_drop_all | mask :
+		mac_filters->mcast_drop_all & ~mask;
+
+	mac_filters->ucast_accept_all = accp_all_ucast ?
+		mac_filters->ucast_accept_all | mask :
+		mac_filters->ucast_accept_all & ~mask;
+
+	mac_filters->mcast_accept_all = accp_all_mcast ?
+		mac_filters->mcast_accept_all | mask :
+		mac_filters->mcast_accept_all & ~mask;
+
+	mac_filters->bcast_accept_all = accp_all_bcast ?
+		mac_filters->bcast_accept_all | mask :
+		mac_filters->bcast_accept_all & ~mask;
+
+	mac_filters->unmatched_unicast = unmatched_unicast ?
+		mac_filters->unmatched_unicast | mask :
+		mac_filters->unmatched_unicast & ~mask;
+
+	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+			 mac_filters->ucast_drop_all,
+			 mac_filters->mcast_drop_all,
+			 mac_filters->ucast_accept_all,
+			 mac_filters->mcast_accept_all,
+			 mac_filters->bcast_accept_all);
+
+	/* write the MAC filter structure*/
+	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+	/* The operation is completed */
+	clear_bit(p->state, p->pstate);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+				struct eth_classify_header *hdr,
+				u8 rule_cnt)
+{
+	hdr->echo = cid;
+	hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+				unsigned long accept_flags,
+				struct eth_filter_rules_cmd *cmd,
+				bool clear_accept_all)
+{
+	u16 state;
+
+	/* start with 'drop-all' */
+	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+	if (accept_flags) {
+		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		}
+
+		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
+			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+		}
+		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+		}
+		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+	}
+
+	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+	if (clear_accept_all) {
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+	}
+
+	cmd->state = cpu_to_le16(state);
+
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+				struct bnx2x_rx_mode_ramrod_params *p)
+{
+	struct eth_filter_rules_ramrod_data *data = p->rdata;
+	int rc;
+	u8 rule_idx = 0;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* Setup ramrod data */
+
+	/* Tx (internal switching) */
+	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_TX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+	/* Rx */
+	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_RX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+
+	/*
+	 * If FCoE Queue configuration has been requested configure the Rx and
+	 * internal switching modes for this queue in separate rules.
+	 *
+	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+	 */
+	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+		/*  Tx (internal switching) */
+		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_TX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+
+		/* Rx */
+		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_RX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+	}
+
+	/*
+	 * Set the ramrod header (most importantly - number of rules to
+	 * configure).
+	 */
+	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
+			 "tx_accept_flags 0x%lx\n",
+			 data->header.rule_cnt, p->rx_accept_flags,
+			 p->tx_accept_flags);
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+			   U64_HI(p->rdata_mapping),
+			   U64_LO(p->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+	if (rc)
+		return rc;
+
+	/* Ramrod completion is pending */
+	return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+				      struct bnx2x_rx_mode_ramrod_params *p)
+{
+	return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+				    struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* Do nothing */
+	return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	int rc;
+
+	/* Configure the new classification in the chip */
+	rc = p->rx_mode_obj->config_rx_mode(bp, p);
+	if (rc < 0)
+		return rc;
+
+	/* Wait for a ramrod completion if was requested */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		rc = p->rx_mode_obj->wait_comp(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o)
+{
+	if (CHIP_IS_E1x(bp)) {
+		o->wait_comp      = bnx2x_empty_rx_mode_wait;
+		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+	} else {
+		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+		o->config_rx_mode = bnx2x_set_rx_mode_e2;
+	}
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+	struct list_head link;
+	u8 mac[ETH_ALEN];
+	u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+	struct list_head link;
+	int type; /* BNX2X_MCAST_CMD_X */
+	union {
+		struct list_head macs_head;
+		u32 macs_num; /* Needed for DEL command */
+		int next_bin; /* Needed for RESTORE flow with aprox match */
+	} data;
+
+	bool done; /* set to true, when the command has been handled,
+		    * practically used in 57712 handling only, where one pending
+		    * command may be handled in a few operations. As long as for
+		    * other chips every operation handling is completed in a
+		    * single ramrod, there is no need to utilize this field.
+		    */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+			    struct bnx2x_mcast_obj *o)
+{
+	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+			o->raw.wait_comp(bp, &o->raw))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+				   struct bnx2x_mcast_obj *o,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	int total_sz;
+	struct bnx2x_pending_mcast_cmd *new_cmd;
+	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+	struct bnx2x_mcast_list_elem *pos;
+	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+			     p->mcast_list_len : 0);
+
+	/* If the command is empty ("handle pending commands only"), break */
+	if (!p->mcast_list_len)
+		return 0;
+
+	total_sz = sizeof(*new_cmd) +
+		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+	if (!new_cmd)
+		return -ENOMEM;
+
+	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
+			 "macs_list_len=%d\n", cmd, macs_list_len);
+
+	INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+	new_cmd->type = cmd;
+	new_cmd->done = false;
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		cur_mac = (struct bnx2x_mcast_mac_elem *)
+			  ((u8 *)new_cmd + sizeof(*new_cmd));
+
+		/* Push the MACs of the current command into the pendig command
+		 * MACs list: FIFO
+		 */
+		list_for_each_entry(pos, &p->mcast_list, link) {
+			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+			cur_mac++;
+		}
+
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		new_cmd->data.macs_num = p->mcast_list_len;
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		new_cmd->data.next_bin = 0;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Push the new pending command to the tail of the pending list: FIFO */
+	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+	o->set_sched(o);
+
+	return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last:	index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+		if (o->registry.aprox_match.vec[i])
+			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+						       vec, cur_bit)) {
+					return cur_bit;
+				}
+			}
+		inner_start = 0;
+	}
+
+	/* None found */
+	return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+	if (cur_bit >= 0)
+		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+	return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+	u8 func_id = r->func_id;
+	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+	int bin;
+
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+	/* Get a bin and update a bins' vector */
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		/* If there were no more bins to clear
+		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+		 * clear any (0xff) bin.
+		 * See bnx2x_mcast_validate_e2() for explanation when it may
+		 * happen.
+		 */
+		bin = bnx2x_mcast_clear_first_bin(o);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		bin = cfg_data->bin;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return;
+	}
+
+	DP(BNX2X_MSG_SP, "%s bin %d\n",
+			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+			 "Setting"  : "Clearing"), bin);
+
+	data->rules[idx].bin_id    = (u8)bin;
+	data->rules[idx].func_id   = func_id;
+	data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_bin:	index in the registry to start from (including)
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+	int *rdata_idx)
+{
+	int cur_bin, cnt = *rdata_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the bins from it */
+	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+		cfg_data.bin = (u8)cur_bin;
+		o->set_one_rule(bp, o, cnt, &cfg_data,
+				BNX2X_MCAST_CMD_RESTORE);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*rdata_idx = cnt;
+
+	return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+	int cnt = *line_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+				 link) {
+
+		cfg_data.mac = &pmac_pos->mac[0];
+		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+
+		list_del(&pmac_pos->link);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* if no more MACs to configure - we are done */
+	if (list_empty(&cmd_pos->data.macs_head))
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	int cnt = *line_idx;
+
+	while (cmd_pos->data.macs_num) {
+		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+		cnt++;
+
+		cmd_pos->data.macs_num--;
+
+		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+				   cmd_pos->data.macs_num, cnt);
+
+		/* Break if we reached the maximum
+		 * number of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* If we cleared all bins - we are done */
+	if (!cmd_pos->data.macs_num)
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+						line_idx);
+
+	if (cmd_pos->data.next_bin < 0)
+		/* If o->set_restore returned -1 we are done */
+		cmd_pos->done = true;
+	else
+		/* Start from the next bin next time */
+		cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+	int cnt = 0;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+				 link) {
+		switch (cmd_pos->type) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+							   &cnt);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+			return -EINVAL;
+		}
+
+		/* If the command has been completed - remove it from the list
+		 * and free the memory
+		 */
+		if (cmd_pos->done) {
+			list_del(&cmd_pos->link);
+			kfree(cmd_pos);
+		}
+
+		/* Break if we reached the maximum number of rules */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = *line_idx;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		cfg_data.mac = mlist_pos->mac;
+		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
+	}
+
+	*line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	int cnt = *line_idx, i;
+
+	for (i = 0; i < p->mcast_list_len; i++) {
+		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+				 p->mcast_list_len - i - 1);
+	}
+
+	*line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp:		device handle
+ * @p:
+ * @cmd:
+ * @start_cnt:	first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd,
+			int start_cnt)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int cnt = start_cnt;
+
+	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* The current command has been handled */
+	p->mcast_list_len = 0;
+
+	return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		/* Here we set the approximate amount of work to do, which in
+		 * fact may be only less as some MACs in postponed ADD
+		 * command(s) scheduled before this command may fall into
+		 * the same bin and the actual number of bins set in the
+		 * registry would be less than we estimated here. See
+		 * bnx2x_mcast_set_one_rule_e2() for further details.
+		 */
+		p->mcast_list_len = reg_sz;
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Here we assume that all new MACs will fall into new bins.
+		 * However we will correct the real registry size after we
+		 * handle all pending commands.
+		 */
+		o->set_registry_size(o, reg_sz + p->mcast_list_len);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* Increase the total number of MACs pending to be configured */
+	o->total_pending_num += p->mcast_list_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_bins)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_bins);
+	o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp:		device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	int i, cnt = 0;
+	u64 elem;
+
+	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+		elem = o->registry.aprox_match.vec[i];
+		for (; elem; cnt++)
+			elem &= elem - 1;
+	}
+
+	o->set_registry_size(o, cnt);
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+	int cnt = 0, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there was enough room in ramrod
+	 * data for all pending commands and for the current
+	 * command. Otherwise the current command would have been added
+	 * to the pending commands and p->mcast_list_len would have been
+	 * zeroed.
+	 */
+	if (p->mcast_list_len > 0)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+	/* We've pulled out some MACs - update the total number of
+	 * outstanding.
+	 */
+	o->total_pending_num -= cnt;
+
+	/* send a ramrod */
+	WARN_ON(o->total_pending_num < 0);
+	WARN_ON(cnt > o->max_cmd_len);
+
+	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+	/* Update a registry size if there are no more pending operations.
+	 *
+	 * We don't want to change the value of the registry size if there are
+	 * pending operations because we want it to always be equal to the
+	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+	 * set bins after the last requested operation in order to properly
+	 * evaluate the size of the next DEL/RESTORE operation.
+	 *
+	 * Note that we update the registry itself during command(s) handling
+	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+	 * with a limited amount of update commands (per MAC/bin) and we don't
+	 * know in this scope what the actual state of bins configuration is
+	 * going to be after this ramrod.
+	 */
+	if (!o->total_pending_num)
+		bnx2x_mcast_refresh_registry_e2(bp, o);
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+				   raw->cid, U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+				    struct bnx2x_mcast_ramrod_params *p,
+				    int cmd)
+{
+	/* Mark, that there is a work to do */
+	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		p->mcast_list_len = 1;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+				       struct bnx2x_mcast_ramrod_params *p,
+				       int old_num_bins)
+{
+	/* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+					   struct bnx2x_mcast_obj *o,
+					   struct bnx2x_mcast_ramrod_params *p,
+					   u32 *mc_filter)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	int bit;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+		DP(BNX2X_MSG_SP, "About to configure "
+				 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
+
+		/* bookkeeping... */
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+				  bit);
+	}
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	u32 *mc_filter)
+{
+	int bit;
+
+	for (bit = bnx2x_mcast_get_next_bin(o, 0);
+	     bit >= 0;
+	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+	}
+}
+
+/* On 57711 we write the multicast MACs' aproximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+				 struct bnx2x_mcast_ramrod_params *p,
+				 int cmd)
+{
+	int i;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* If CLEAR_ONLY has been requested - clear the registry
+	 * and clear a pending bit.
+	 */
+	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		u32 mc_filter[MC_HASH_SIZE] = {0};
+
+		/* Set the multicast filter bits before writing it into
+		 * the internal memory.
+		 */
+		switch (cmd) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			DP(BNX2X_MSG_SP, "Invalidating multicast "
+					 "MACs configuration\n");
+
+			/* clear the registry */
+			memset(o->registry.aprox_match.vec, 0,
+			       sizeof(o->registry.aprox_match.vec));
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd);
+			return -EINVAL;
+		}
+
+		/* Set the mcast filter in the internal memory */
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+	} else
+		/* clear the registry */
+		memset(o->registry.aprox_match.vec, 0,
+		       sizeof(o->registry.aprox_match.vec));
+
+	/* We are done */
+	r->clear_pending(r);
+
+	return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		p->mcast_list_len = reg_sz;
+		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+				   cmd, p->mcast_list_len);
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Multicast MACs on 57710 are configured as unicast MACs and
+		 * there is only a limited number of CAM entries for that
+		 * matter.
+		 */
+		if (p->mcast_list_len > o->max_cmd_len) {
+			BNX2X_ERR("Can't configure more than %d multicast MACs"
+				   "on 57710\n", o->max_cmd_len);
+			return -EINVAL;
+		}
+		/* Every configured MAC should be cleared if DEL command is
+		 * called. Only the last ADD command is relevant as long as
+		 * every ADD commands overrides the previous configuration.
+		 */
+		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+		if (p->mcast_list_len > 0)
+			o->set_registry_size(o, p->mcast_list_len);
+
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* We want to ensure that commands are executed one by one for 57710.
+	 * Therefore each none-empty command will consume o->max_cmd_len.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num += o->max_cmd_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_macs)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_macs);
+
+	/* If current command hasn't been handled yet and we are
+	 * here means that it's meant to be dropped and we have to
+	 * update the number of outstandling MACs accordingly.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	/* copy mac */
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+				      &data->config_table[idx].middle_mac_addr,
+				      &data->config_table[idx].lsb_mac_addr,
+				      cfg_data->mac);
+
+		data->config_table[idx].vlan_id = 0;
+		data->config_table[idx].pf_id = r->func_id;
+		data->config_table[idx].clients_bit_vector =
+			cpu_to_le32(1 << r->cl_id);
+
+		SET_FLAG(data->config_table[idx].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+	}
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+	data->hdr.offset = offset;
+	data->hdr.client_id = 0xff;
+	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_idx:	index in the registry to start from
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+	int *rdata_idx)
+{
+	struct bnx2x_mcast_mac_elem *elem;
+	int i = 0;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the MACs from it. */
+	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+		cfg_data.mac = &elem->mac[0];
+		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+		i++;
+
+		  DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				   " mcast MAC\n",
+				   BNX2X_MAC_PRN_LIST(cfg_data.mac));
+	}
+
+	*rdata_idx = i;
+
+	return -1;
+}
+
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos;
+	struct bnx2x_mcast_mac_elem *pmac_pos;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = 0;
+
+
+	/* If nothing to be done - return */
+	if (list_empty(&o->pending_cmds_head))
+		return 0;
+
+	/* Handle the first command */
+	cmd_pos = list_first_entry(&o->pending_cmds_head,
+				   struct bnx2x_pending_mcast_cmd, link);
+
+	switch (cmd_pos->type) {
+	case BNX2X_MCAST_CMD_ADD:
+		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+			cfg_data.mac = &pmac_pos->mac[0];
+			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+			cnt++;
+
+			DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+					 " mcast MAC\n",
+					 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+		}
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		cnt = cmd_pos->data.macs_num;
+		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+		return -EINVAL;
+	}
+
+	list_del(&cmd_pos->link);
+	kfree(cmd_pos);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+					 __le16 *fw_lo, u8 *mac)
+{
+	mac[1] = ((u8 *)fw_hi)[0];
+	mac[0] = ((u8 *)fw_hi)[1];
+	mac[3] = ((u8 *)fw_mid)[0];
+	mac[2] = ((u8 *)fw_mid)[1];
+	mac[5] = ((u8 *)fw_lo)[0];
+	mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp:		device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct bnx2x_mcast_mac_elem *elem;
+	struct mac_configuration_cmd *data =
+			(struct mac_configuration_cmd *)(raw->rdata);
+
+	/* If first entry contains a SET bit - the command was ADD,
+	 * otherwise - DEL_ALL
+	 */
+	if (GET_FLAG(data->config_table[0].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+		int i, len = data->hdr.length;
+
+		/* Break if it was a RESTORE command */
+		if (!list_empty(&o->registry.exact_match.macs))
+			return 0;
+
+		elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+		if (!elem) {
+			BNX2X_ERR("Failed to allocate registry memory\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < len; i++, elem++) {
+			bnx2x_get_fw_mac_addr(
+				&data->config_table[i].msb_mac_addr,
+				&data->config_table[i].middle_mac_addr,
+				&data->config_table[i].lsb_mac_addr,
+				elem->mac);
+			DP(BNX2X_MSG_SP, "Adding registry entry for ["
+					 BNX2X_MAC_FMT"]\n",
+				   BNX2X_MAC_PRN_LIST(elem->mac));
+			list_add_tail(&elem->link,
+				      &o->registry.exact_match.macs);
+		}
+	} else {
+		elem = list_first_entry(&o->registry.exact_match.macs,
+					struct bnx2x_mcast_mac_elem, link);
+		DP(BNX2X_MSG_SP, "Deleting a registry\n");
+		kfree(elem);
+		INIT_LIST_HEAD(&o->registry.exact_match.macs);
+	}
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	int cnt = 0, i, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* First set all entries as invalid */
+	for (i = 0; i < o->max_cmd_len ; i++)
+		SET_FLAG(data->config_table[i].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+
+	/* Handle pending commands first */
+	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there were no pending commands */
+	if (!cnt)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+	/* For 57710 every command has o->max_cmd_len length to ensure that
+	 * commands are done one at a time.
+	 */
+	o->total_pending_num -= o->max_cmd_len;
+
+	/* send a ramrod */
+
+	WARN_ON(cnt > o->max_cmd_len);
+
+	/* Set ramrod header (in particular, a number of entries to update) */
+	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+	/* update a registry: we need the registry contents to be always up
+	 * to date in order to be able to execute a RESTORE opcode. Here
+	 * we use the fact that for 57710 we sent one command at a time
+	 * hence we may take the registry update out of the command handling
+	 * and do it in a simpler way here.
+	 */
+	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+	if (rc)
+		return rc;
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+				   U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc = 0, old_reg_size;
+
+	/* This is needed to recover number of currently configured mcast macs
+	 * in case of failure.
+	 */
+	old_reg_size = o->get_registry_size(o);
+
+	/* Do some calculations and checks */
+	rc = o->validate(bp, p, cmd);
+	if (rc)
+		return rc;
+
+	/* Return if there is no work to do */
+	if ((!p->mcast_list_len) && (!o->check_sched(o)))
+		return 0;
+
+	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
+			 "o->max_cmd_len=%d\n", o->total_pending_num,
+			 p->mcast_list_len, o->max_cmd_len);
+
+	/* Enqueue the current command to the pending list if we can't complete
+	 * it in the current iteration
+	 */
+	if (r->check_pending(r) ||
+	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+		if (rc < 0)
+			goto error_exit1;
+
+		/* As long as the current command is in a command list we
+		 * don't need to handle it separately.
+		 */
+		p->mcast_list_len = 0;
+	}
+
+	if (!r->check_pending(r)) {
+
+		/* Set 'pending' state */
+		r->set_pending(r);
+
+		/* Configure the new classification in the chip */
+		rc = o->config_mcast(bp, p, cmd);
+		if (rc < 0)
+			goto error_exit2;
+
+		/* Wait for a ramrod completion if was requested */
+		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+			rc = o->wait_comp(bp, o);
+	}
+
+	return rc;
+
+error_exit2:
+	r->clear_pending(r);
+
+error_exit1:
+	o->revert(bp, p, old_reg_size);
+
+	return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+	return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+	return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+	memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+			   rdata, rdata_mapping, state, pstate, type);
+
+	mcast_obj->engine_id = engine_id;
+
+	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+	mcast_obj->check_sched = bnx2x_mcast_check_sched;
+	mcast_obj->set_sched = bnx2x_mcast_set_sched;
+	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+	if (CHIP_IS_E1(bp)) {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e1;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+
+		if (CHIP_REV_IS_SLOW(bp))
+			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+		else
+			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_exact;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_exact;
+
+		/* 57710 is the only chip that uses the exact match for mcast
+		 * at the moment.
+		 */
+		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
+		mcast_obj->enqueue_cmd   = NULL;
+		mcast_obj->hdl_restore   = NULL;
+		mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+		/* 57711 doesn't send a ramrod, so it has unlimited credit
+		 * for one command.
+		 */
+		mcast_obj->max_cmd_len       = -1;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = NULL;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	} else {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e2;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+		/* TODO: There should be a proper HSI define for this number!!!
+		 */
+		mcast_obj->max_cmd_len       = 16;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
+		mcast_obj->validate          = bnx2x_mcast_validate_e2;
+		mcast_obj->revert            = bnx2x_mcast_revert_e2;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	}
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to add to v...
+ * @u:	...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c + a >= u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c + a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to dec from v...
+ * @u:	...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c - a < u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c - a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+	smp_mb();
+
+	return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+
+	/* Don't let to refill if credit + cnt > pool_sz */
+	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+	smp_mb();
+
+	return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+	int cur_credit;
+
+	smp_mb();
+	cur_credit = atomic_read(&o->credit);
+
+	return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+					  int cnt)
+{
+	return true;
+}
+
+
+static bool bnx2x_credit_pool_get_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	int idx, vec, i;
+
+	*offset = -1;
+
+	/* Find "internal cam-offset" then add to base for this object... */
+	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+		/* Skip the current vector if there are no free entries in it */
+		if (!o->pool_mirror[vec])
+			continue;
+
+		/* If we've got here we are going to find a free entry */
+		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+				/* Got one!! */
+				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+				*offset = o->base_pool_offset + idx;
+				return true;
+			}
+	}
+
+	return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	if (offset < o->base_pool_offset)
+		return false;
+
+	offset -= o->base_pool_offset;
+
+	if (offset >= o->pool_sz)
+		return false;
+
+	/* Return the entry to the pool */
+	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	*offset = -1;
+	return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base:	Base entry in the CAM to use.
+ * @credit:	pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+					  int base, int credit)
+{
+	/* Zero the object first */
+	memset(p, 0, sizeof(*p));
+
+	/* Set the table to all 1s */
+	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+	/* Init a pool as full */
+	atomic_set(&p->credit, credit);
+
+	/* The total poll size */
+	p->pool_sz = credit;
+
+	p->base_pool_offset = base;
+
+	/* Commit the change */
+	smp_mb();
+
+	p->check = bnx2x_credit_pool_check;
+
+	/* if pool credit is negative - disable the checks */
+	if (credit >= 0) {
+		p->put      = bnx2x_credit_pool_put;
+		p->get      = bnx2x_credit_pool_get;
+		p->put_entry = bnx2x_credit_pool_put_entry;
+		p->get_entry = bnx2x_credit_pool_get_entry;
+	} else {
+		p->put      = bnx2x_credit_pool_always_true;
+		p->get      = bnx2x_credit_pool_always_true;
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+
+	/* If base is negative - disable entries handling */
+	if (base < 0) {
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+	int cam_sz;
+
+	if (CHIP_IS_E1(bp)) {
+		/* In E1, Multicast is saved in cam... */
+		if (!CHIP_REV_IS_SLOW(bp))
+			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+		else
+			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		/* CAM credit is equaly divided between all active functions
+		 * on the PORT!.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	} else {
+
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+			/*
+			 * No need for CAM entries handling for 57712 and
+			 * newer.
+			 */
+			bnx2x_init_credit_pool(p, -1, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	}
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p,
+				 u8 func_id,
+				 u8 func_num)
+{
+	if (CHIP_IS_E1x(bp)) {
+		/*
+		 * There is no VLAN credit in HW on 57710 and 57711 only
+		 * MAC / MAC-VLAN can be set
+		 */
+		bnx2x_init_credit_pool(p, 0, -1);
+	} else {
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if (func_num > 0) {
+			int credit = MAX_VLAN_CREDIT_E2 / func_num;
+			bnx2x_init_credit_pool(p, func_id * credit, credit);
+		} else
+			/* this should never happen! Block VLAN operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+	}
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp:		driver hanlde
+ * @p:		pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+					struct bnx2x_config_rss_params *p)
+{
+	int i;
+
+	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+	DP(BNX2X_MSG_SP, "0x0000: ");
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+		/* Print 4 bytes in a line */
+		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+		    (((i + 1) & 0x3) == 0)) {
+			DP_CONT(BNX2X_MSG_SP, "\n");
+			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+		}
+	}
+
+	DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp:		device handle
+ * @p:		rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+			   struct bnx2x_config_rss_params *p)
+{
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_rss_update_ramrod_data *data =
+		(struct eth_rss_update_ramrod_data *)(r->rdata);
+	u8 rss_mode = 0;
+	int rc;
+
+	memset(data, 0, sizeof(*data));
+
+	DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+	/* Set an echo field */
+	data->echo = (r->cid & BNX2X_SWCID_MASK) |
+		     (r->state << BNX2X_SWCID_SHIFT);
+
+	/* RSS mode */
+	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_DISABLED;
+	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_REGULAR;
+	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_VLAN_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_IP_DSCP;
+
+	data->rss_mode = rss_mode;
+
+	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+	/* RSS capabilities */
+	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+	/* Hashing mask */
+	data->rss_result_mask = p->rss_result_mask;
+
+	/* RSS engine ID */
+	data->rss_engine_id = o->engine_id;
+
+	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+	/* Indirection table */
+	memcpy(data->indirection_table, p->ind_table,
+		  T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Remember the last configuration */
+	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Print the indirection table */
+	if (netif_msg_ifup(bp))
+		bnx2x_debug_print_ind_table(bp, p);
+
+	/* RSS keys */
+	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+		memcpy(&data->rss_key[0], &p->rss_key[0],
+		       sizeof(data->rss_key));
+		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+	}
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+			   U64_HI(r->rdata_mapping),
+			   U64_LO(r->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+
+	if (rc < 0)
+		return rc;
+
+	return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table)
+{
+	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p)
+{
+	int rc;
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* Do nothing if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+		return 0;
+
+	r->set_pending(r);
+
+	rc = o->config_rss(bp, p);
+	if (rc < 0) {
+		r->clear_pending(r);
+		return rc;
+	}
+
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+		rc = r->wait_comp(bp, r);
+
+	return rc;
+}
+
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type)
+{
+	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+			   rdata_mapping, state, pstate, type);
+
+	rss_obj->engine_id  = engine_id;
+	rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	int rc, pending_bit;
+	unsigned long *pending = &o->pending;
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params))
+		return -EINVAL;
+
+	/* Set "pending" bit */
+	pending_bit = o->set_pending(o, params);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+		o->complete_cmd(bp, o, pending_bit);
+	else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+		if (rc) {
+			o->next_state = BNX2X_Q_STATE_MAX;
+			clear_bit(pending_bit, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, pending_bit);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(pending_bit, pending);
+}
+
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+				   struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+	/* ACTIVATE and DEACTIVATE commands are implemented on top of
+	 * UPDATE command.
+	 */
+	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
+		bit = BNX2X_Q_CMD_UPDATE;
+	else
+		bit = cmd;
+
+	set_bit(bit, &obj->pending);
+	return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+				 struct bnx2x_queue_sp_obj *o,
+				 enum bnx2x_queue_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				enum bnx2x_queue_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd, o->cid,
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
+			 "setting state to %d\n", cmd, o->cid, o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_Q_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+	/* Rx data */
+
+	/* IPv6 TPA supported for E2 and above only */
+	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_sp_obj *o = cmd_params->q_obj;
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+
+	/* general */
+	data->general.client_id = o->cl_id;
+
+	if (test_bit(BNX2X_Q_FLG_STATS, &params->flags)) {
+		data->general.statistics_counter_id =
+					params->gen_params.stat_id;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_zero_flg =
+			test_bit(BNX2X_Q_FLG_ZERO_STATS, &params->flags);
+	} else
+		data->general.statistics_counter_id =
+					DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+	data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, &params->flags);
+	data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE,
+					      &params->flags);
+	data->general.sp_client_id = params->gen_params.spcl_id;
+	data->general.mtu = cpu_to_le16(params->gen_params.mtu);
+	data->general.func_id = o->func_id;
+
+
+	data->general.cos = params->txq_params.cos;
+
+	data->general.traffic_type =
+		test_bit(BNX2X_Q_FLG_FCOE, &params->flags) ?
+		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+	/* Rx data */
+	data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+	data->rx.vmqueue_mode_en_flg = 0;
+
+	data->rx.cache_line_alignment_log_size =
+		params->rxq_params.cache_line_log;
+	data->rx.enable_dynamic_hc =
+		test_bit(BNX2X_Q_FLG_DHC, &params->flags);
+	data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
+	data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
+	data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz);
+
+	/* Always start in DROP_ALL mode */
+	data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+	/* We don't set drop flags */
+	data->rx.drop_ip_cs_err_flg = 0;
+	data->rx.drop_tcp_cs_err_flg = 0;
+	data->rx.drop_ttl0_flg = 0;
+	data->rx.drop_udp_cs_err_flg = 0;
+	data->rx.inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_VLAN, &params->flags);
+	data->rx.outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_OV, &params->flags);
+	data->rx.status_block_id = params->rxq_params.fw_sb_id;
+	data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
+	data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues;
+	data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz);
+	data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
+	data->rx.bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
+	data->rx.bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
+	data->rx.sge_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.sge_map));
+	data->rx.sge_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.sge_map));
+	data->rx.cqe_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
+	data->rx.cqe_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
+	data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS,
+					   &params->flags);
+
+	if (test_bit(BNX2X_Q_FLG_MCAST, &params->flags)) {
+		data->rx.approx_mcast_engine_id = o->func_id;
+		data->rx.is_approx_mcast = 1;
+	}
+
+	data->rx.rss_engine_id = params->rxq_params.rss_engine_id;
+
+	/* flow control data */
+	data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
+	data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
+	data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
+	data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
+	data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
+	data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
+	data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
+
+	/* silent vlan removal */
+	data->rx.silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &params->flags);
+	data->rx.silent_vlan_value =
+		cpu_to_le16(params->rxq_params.silent_removal_value);
+	data->rx.silent_vlan_mask =
+		cpu_to_le16(params->rxq_params.silent_removal_mask);
+
+	/* Tx data */
+	data->tx.enforce_security_flg =
+		test_bit(BNX2X_Q_FLG_TX_SEC, &params->flags);
+	data->tx.default_vlan =
+		cpu_to_le16(params->txq_params.default_vlan);
+	data->tx.default_vlan_flg =
+		test_bit(BNX2X_Q_FLG_DEF_VLAN, &params->flags);
+	data->tx.tx_switching_flg =
+		test_bit(BNX2X_Q_FLG_TX_SWITCH, &params->flags);
+	data->tx.anti_spoofing_flg =
+		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, &params->flags);
+	data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
+	data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
+	data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id;
+
+	data->tx.tx_bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->txq_params.dscr_map));
+	data->tx.tx_bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->txq_params.dscr_map));
+
+	/* Don't configure any Tx switching mode during queue SETUP */
+	data->tx.state = 0;
+}
+
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ *      - HC: Rx and Tx
+ *      - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+			       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct bnx2x_queue_init_params *init = &params->params.init;
+	u16 hc_usec;
+
+	/* Tx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+			init->tx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+			hc_usec);
+	}
+
+	/* Rx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+			init->rx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+			hc_usec);
+	}
+
+	/* Set CDU context validation values */
+	bnx2x_set_ctx_validation(bp, init->cxt, o->cid);
+
+	/* As no ramrod is sent, complete the command immediately  */
+	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+	mmiowb();
+	smp_mb();
+
+	return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+				     struct bnx2x_queue_sp_obj *obj,
+				     struct bnx2x_queue_update_params *params,
+				     struct client_update_ramrod_data *data)
+{
+	/* Client ID of the client to update */
+	data->client_id = obj->cl_id;
+
+	/* Function ID of the client to update */
+	data->func_id = obj->func_id;
+
+	/* Default VLAN value */
+	data->default_vlan = cpu_to_le16(params->def_vlan);
+
+	/* Inner VLAN stripping */
+	data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+	data->inner_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Outer VLAN sripping */
+	data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+	data->outer_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Drop packets that have source MAC that doesn't belong to this
+	 * Queue.
+	 */
+	data->anti_spoofing_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+	data->anti_spoofing_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+	/* Activate/Deactivate */
+	data->activate_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+	data->activate_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+	/* Enable default VLAN */
+	data->default_vlan_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+	data->default_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			 &params->update_flags);
+
+	/* silent vlan removal */
+	data->silent_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			 &params->update_flags);
+	data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+				      struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_update_ramrod_data *rdata =
+		(struct client_update_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_update_data(bp, o, &params->params.update, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	/* TODO: Not implemented yet. */
+	return -1;
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+				       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+				     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_INIT:
+		return bnx2x_q_init(bp, params);
+	case BNX2X_Q_CMD_DEACTIVATE:
+		return bnx2x_q_send_deactivate(bp, params);
+	case BNX2X_Q_CMD_ACTIVATE:
+		return bnx2x_q_send_activate(bp, params);
+	case BNX2X_Q_CMD_UPDATE:
+		return bnx2x_q_send_update(bp, params);
+	case BNX2X_Q_CMD_UPDATE_TPA:
+		return bnx2x_q_send_update_tpa(bp, params);
+	case BNX2X_Q_CMD_HALT:
+		return bnx2x_q_send_halt(bp, params);
+	case BNX2X_Q_CMD_CFC_DEL:
+		return bnx2x_q_send_cfc_del(bp, params);
+	case BNX2X_Q_CMD_TERMINATE:
+		return bnx2x_q_send_terminate(bp, params);
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_q_send_empty(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e1x(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+				   struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e2(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+				      struct bnx2x_queue_sp_obj *o,
+				      struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+	enum bnx2x_queue_cmd cmd = params->cmd;
+
+	switch (state) {
+	case BNX2X_Q_STATE_RESET:
+		if (cmd == BNX2X_Q_CMD_INIT)
+			next_state = BNX2X_Q_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_Q_STATE_INITIALIZED:
+		if (cmd == BNX2X_Q_CMD_SETUP) {
+			if (test_bit(BNX2X_Q_FLG_ACTIVE,
+				     &params->params.setup.flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_ACTIVE:
+		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			struct bnx2x_queue_update_params *update_params =
+				&params->params.update;
+
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_ACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_INACTIVE:
+		if (cmd == BNX2X_Q_CMD_ACTIVATE)
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			struct bnx2x_queue_update_params *update_params =
+				&params->params.update;
+
+			/* If "active" state change is requested, update the
+			 * state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				     &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_STOPPED:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_TERMINATED;
+
+		break;
+	case BNX2X_Q_STATE_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL)
+			next_state = BNX2X_Q_STATE_RESET;
+
+		break;
+	default:
+		BNX2X_ERR("Illegal state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_Q_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+	return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj,
+			  u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	obj->cid = cid;
+	obj->cl_id = cl_id;
+	obj->func_id = func_id;
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+	obj->type = type;
+	obj->next_state = BNX2X_Q_STATE_MAX;
+
+	if (CHIP_IS_E1x(bp))
+		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+	else
+		obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+	obj->check_transition = bnx2x_queue_chk_transition;
+
+	obj->complete_cmd = bnx2x_queue_comp_cmd;
+	obj->wait_comp = bnx2x_queue_wait_comp;
+	obj->set_pending = bnx2x_queue_set_pending;
+}
+
+/********************** Function state object *********************************/
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				enum bnx2x_func_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+					       struct bnx2x_func_sp_obj *o,
+					       enum bnx2x_func_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for func %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
+			 "%d\n", cmd, BP_FUNC(bp), o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_F_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_sp_obj *o,
+			       enum bnx2x_func_cmd cmd)
+{
+	/* Complete the state machine part first, check if it's a
+	 * legal completion.
+	 */
+	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+	return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+				     struct bnx2x_func_sp_obj *o,
+				     struct bnx2x_func_state_params *params)
+{
+	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+	enum bnx2x_func_cmd cmd = params->cmd;
+
+	switch (state) {
+	case BNX2X_F_STATE_RESET:
+		if (cmd == BNX2X_F_CMD_HW_INIT)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_F_STATE_INITIALIZED:
+		if (cmd == BNX2X_F_CMD_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if (cmd == BNX2X_F_CMD_HW_RESET)
+			next_state = BNX2X_F_STATE_RESET;
+
+		break;
+	case BNX2X_F_STATE_STARTED:
+		if (cmd == BNX2X_F_CMD_STOP)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_F_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+			 state, cmd);
+
+	return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_port(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn_chip(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+				      const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+			      struct bnx2x_func_state_params *params)
+{
+	u32 load_code = params->params.hw_init.load_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+	int rc = 0;
+
+	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
+			 BP_ABS_FUNC(bp), load_code);
+
+	/* Prepare buffers for unzipping the FW */
+	rc = drv->gunzip_init(bp);
+	if (rc)
+		return rc;
+
+	/* Prepare FW */
+	rc = drv->init_fw(bp);
+	if (rc) {
+		BNX2X_ERR("Error loading firmware\n");
+		goto fw_init_err;
+	}
+
+	/* Handle the beginning of COMMON_XXX pases separatelly... */
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		rc = bnx2x_func_init_cmn_chip(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		rc = bnx2x_func_init_cmn(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		rc = bnx2x_func_init_port(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		rc = bnx2x_func_init_func(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		rc = -EINVAL;
+	}
+
+init_hw_err:
+	drv->release_fw(bp);
+
+fw_init_err:
+	drv->gunzip_end(bp);
+
+	/* In case of success, complete the comand immediatelly: no ramrods
+	 * have been sent.
+	 */
+	if (!rc)
+		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+	return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reser HW at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ *                 !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_port(bp);
+	bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reser HW at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	bnx2x_func_reset_port(bp, drv);
+	drv->reset_hw_cmn(bp);
+}
+
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+				      struct bnx2x_func_state_params *params)
+{
+	u32 reset_phase = params->params.hw_reset.reset_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
+			 reset_phase);
+
+	switch (reset_phase) {
+	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+		bnx2x_func_reset_cmn(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_PORT:
+		bnx2x_func_reset_port(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+		bnx2x_func_reset_func(bp, drv);
+		break;
+	default:
+		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+			   reset_phase);
+		break;
+	}
+
+	/* Complete the comand immediatelly: no ramrods have been sent. */
+	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+	return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_start_data *rdata =
+		(struct function_start_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_start_params *start_params = &params->params.start;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+	rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
+	rdata->path_id       = BP_PATH(bp);
+	rdata->network_cos_mode = start_params->network_cos_mode;
+
+	mb();
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_F_CMD_HW_INIT:
+		return bnx2x_func_hw_init(bp, params);
+	case BNX2X_F_CMD_START:
+		return bnx2x_func_send_start(bp, params);
+	case BNX2X_F_CMD_STOP:
+		return bnx2x_func_send_stop(bp, params);
+	case BNX2X_F_CMD_HW_RESET:
+		return bnx2x_func_hw_reset(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	mutex_init(&obj->one_pending_mutex);
+
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+
+	obj->send_cmd = bnx2x_func_send_cmd;
+	obj->check_transition = bnx2x_func_chk_transition;
+	obj->complete_cmd = bnx2x_func_comp_cmd;
+	obj->wait_comp = bnx2x_func_wait_comp;
+
+	obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ *         negative error code in case of failure, positive
+ *         (EBUSY) value if there is a completion to that is
+ *         still pending (possible only if RAMROD_COMP_WAIT is
+ *         not set in params->ramrod_flags for asynchronous
+ *         commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	int rc;
+	enum bnx2x_func_cmd cmd = params->cmd;
+	unsigned long *pending = &o->pending;
+
+	mutex_lock(&o->one_pending_mutex);
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params)) {
+		mutex_unlock(&o->one_pending_mutex);
+		return -EINVAL;
+	}
+
+	/* Set "pending" bit */
+	set_bit(cmd, pending);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		bnx2x_func_state_change_comp(bp, o, cmd);
+		mutex_unlock(&o->one_pending_mutex);
+	} else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+
+		mutex_unlock(&o->one_pending_mutex);
+
+		if (rc) {
+			o->next_state = BNX2X_F_STATE_MAX;
+			clear_bit(cmd, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, cmd);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
new file mode 100644
index 0000000..86eaa80
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1235 @@
+/* bnx2x_sp.h: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+	RAMROD_TX,
+	RAMROD_RX,
+	/* Wait until all pending commands complete */
+	RAMROD_COMP_WAIT,
+	/* Don't send a ramrod, only update a registry */
+	RAMROD_DRV_CLR_ONLY,
+	/* Configure HW according to the current object state */
+	RAMROD_RESTORE,
+	 /* Execute the next command now */
+	RAMROD_EXEC,
+	/*
+	 * Don't add a new command and continue execution of posponed
+	 * commands. If not set a new command will be added to the
+	 * pending commands list.
+	 */
+	RAMROD_CONT,
+};
+
+typedef enum {
+	BNX2X_OBJ_TYPE_RX,
+	BNX2X_OBJ_TYPE_TX,
+	BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Filtering states */
+enum {
+	BNX2X_FILTER_MAC_PENDING,
+	BNX2X_FILTER_VLAN_PENDING,
+	BNX2X_FILTER_VLAN_MAC_PENDING,
+	BNX2X_FILTER_RX_MODE_PENDING,
+	BNX2X_FILTER_RX_MODE_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+	BNX2X_FILTER_FCOE_ETH_START_SCHED,
+	BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+	BNX2X_FILTER_MCAST_PENDING,
+	BNX2X_FILTER_MCAST_SCHED,
+	BNX2X_FILTER_RSS_CONF_PENDING,
+};
+
+struct bnx2x_raw_obj {
+	u8		func_id;
+
+	/* Queue params */
+	u8		cl_id;
+	u32		cid;
+
+	/* Ramrod data buffer params */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/* Ramrod state params */
+	int		state;   /* "ramrod is pending" state bit */
+	unsigned long	*pstate; /* pointer to state buffer */
+
+	bnx2x_obj_type	obj_type;
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_raw_obj *o);
+
+	bool (*check_pending)(struct bnx2x_raw_obj *o);
+	void (*clear_pending)(struct bnx2x_raw_obj *o);
+	void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+};
+
+struct bnx2x_vlan_ramrod_data {
+	u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+	u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+	struct bnx2x_mac_ramrod_data mac;
+	struct bnx2x_vlan_ramrod_data vlan;
+	struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+	BNX2X_VLAN_MAC_ADD,
+	BNX2X_VLAN_MAC_DEL,
+	BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+	/* Requested command: BNX2X_VLAN_MAC_XX */
+	enum bnx2x_vlan_mac_cmd cmd;
+	/*
+	 * used to contain the data related vlan_mac_flags bits from
+	 * ramrod parameters.
+	 */
+	unsigned long vlan_mac_flags;
+
+	/* Needed for MOVE command */
+	struct bnx2x_vlan_mac_obj *target_obj;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+	struct bnx2x_vlan_mac_data vlan_mac;
+
+	struct {
+		/* TODO */
+	} mcast;
+};
+
+struct bnx2x_exeq_elem {
+	struct list_head		link;
+
+	/* Length of this element in the exe_chunk. */
+	int				cmd_len;
+
+	union bnx2x_exe_queue_cmd_data	cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+	union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+
+/**
+ * @return positive is entry was optimized, 0 - if not, negative
+ *         in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+			     union bnx2x_qable_obj *o,
+			     struct list_head *exe_chunk,
+			     unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+			(*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+				     struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+	/*
+	 * Commands pending for an execution.
+	 */
+	struct list_head	exe_queue;
+
+	/*
+	 * Commands pending for an completion.
+	 */
+	struct list_head	pending_comp;
+
+	spinlock_t		lock;
+
+	/* Maximum length of commands' list for one execution */
+	int			exe_chunk_len;
+
+	union bnx2x_qable_obj	*owner;
+
+	/****** Virtual functions ******/
+	/**
+	 * Called before commands execution for commands that are really
+	 * going to be executed (after 'optimize').
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_validate		validate;
+
+
+	/**
+	 * This will try to cancel the current pending commands list
+	 * considering the new command.
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_optimize		optimize;
+
+	/**
+	 * Run the next commands chunk (owner specific).
+	 */
+	exe_q_execute		execute;
+
+	/**
+	 * Return the exe_queue element containing the specific command
+	 * if any. Otherwise return NULL.
+	 */
+	exe_q_get		get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currenty configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+	struct list_head	link;
+
+	/*
+	 * Used to store the cam offset used for the mac/vlan/vlan-mac.
+	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
+	 * same CAM for these chips.
+	 */
+	int			cam_offset;
+
+	/* Needed for DEL and RESTORE flows */
+	unsigned long		vlan_mac_flags;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+	BNX2X_UC_LIST_MAC,
+	BNX2X_ETH_MAC,
+	BNX2X_ISCSI_ETH_MAC,
+	BNX2X_NETQ_ETH_MAC,
+	BNX2X_DONT_CONSUME_CAM_CREDIT,
+	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct bnx2x_vlan_mac_ramrod_params {
+	/* Object to run the command from */
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* General command flags: COMP_WAIT, etc. */
+	unsigned long ramrod_flags;
+
+	/* Command specific configuration request */
+	struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+	struct bnx2x_raw_obj raw;
+
+	/* Bookkeeping list: will prevent the addition of already existing
+	 * entries.
+	 */
+	struct list_head		head;
+
+	/* TODO: Add it's initialization in the init functions */
+	struct bnx2x_exe_queue_obj	exe_queue;
+
+	/* MACs credit pool */
+	struct bnx2x_credit_pool_obj	*macs_pool;
+
+	/* VLANs credit pool */
+	struct bnx2x_credit_pool_obj	*vlans_pool;
+
+	/* RAMROD command to be used */
+	int				ramrod_cmd;
+
+	/**
+	 * Checks if ADD-ramrod with the given params may be performed.
+	 *
+	 * @return zero if the element may be added
+	 */
+
+	int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+			 union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	struct bnx2x_vlan_mac_registry_elem *
+		(*check_del)(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+			   struct bnx2x_vlan_mac_obj *dst_o,
+			   union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 *  Update the relevant credit object(s) (consume/return
+	 *  correspondingly).
+	 */
+	bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+	bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+	/**
+	 * Configures one rule in the ramrod data buffer.
+	 */
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     struct bnx2x_exeq_elem *elem, int rule_idx,
+			     int cam_offset);
+
+	/**
+	*  Delete all configured elements having the given
+	*  vlan_mac_flags specification. Assumes no pending for
+	*  execution commands. Will schedule all all currently
+	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+	*  specification for deletion and will use the given
+	*  ramrod_flags for the last DEL operation.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param ramrod_flags RAMROD_XX flags
+	 *
+	 * @return 0 if the last operation has completed successfully
+	 *         and there are no more elements left, positive value
+	 *         if there are pending for completion commands,
+	 *         negative value in case of failure.
+	 */
+	int (*delete_all)(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_obj *o,
+			  unsigned long *vlan_mac_flags,
+			  unsigned long *ramrod_flags);
+
+	/**
+	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+	 * configured elements list.
+	 *
+	 * @param bp
+	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
+	 *          ramrod_flags is only taken into an account)
+	 * @param ppos a pointer to the cooky that should be given back in the
+	 *        next call to make function handle the next element. If
+	 *        *ppos is set to NULL it will restart the iterator.
+	 *        If returned *ppos == NULL this means that the last
+	 *        element has been handled.
+	 *
+	 * @return int
+	 */
+	int (*restore)(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_ramrod_params *p,
+		       struct bnx2x_vlan_mac_registry_elem **ppos);
+
+	/**
+	 * Should be called on a completion arival.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param cqe Completion element we are handling
+	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+	 *		       pending commands will be executed.
+	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+	 *		       may also be set if needed.
+	 *
+	 * @return 0 if there are neither pending nor waiting for
+	 *         completion commands. Positive value if there are
+	 *         pending for execution or for completion commands.
+	 *         Negative value in case of an error (including an
+	 *         error in the cqe).
+	 */
+	int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+			union event_ring_elem *cqe,
+			unsigned long *ramrod_flags);
+
+	/**
+	 * Wait for completion of all commands. Don't schedule new ones,
+	 * just wait. It assumes that the completion code will schedule
+	 * for new commands.
+	 */
+	int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+	BNX2X_RX_MODE_FCOE_ETH,
+	BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+	BNX2X_ACCEPT_UNICAST,
+	BNX2X_ACCEPT_MULTICAST,
+	BNX2X_ACCEPT_ALL_UNICAST,
+	BNX2X_ACCEPT_ALL_MULTICAST,
+	BNX2X_ACCEPT_BROADCAST,
+	BNX2X_ACCEPT_UNMATCHED,
+	BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+	struct bnx2x_rx_mode_obj *rx_mode_obj;
+	unsigned long *pstate;
+	int state;
+	u8 cl_id;
+	u32 cid;
+	u8 func_id;
+	unsigned long ramrod_flags;
+	unsigned long rx_mode_flags;
+
+	/*
+	 * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+	 * a tstorm_eth_mac_filter_config (e1x).
+	 */
+	void *rdata;
+	dma_addr_t rdata_mapping;
+
+	/* Rx mode settings */
+	unsigned long rx_accept_flags;
+
+	/* internal switching settings */
+	unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+	int (*config_rx_mode)(struct bnx2x *bp,
+			      struct bnx2x_rx_mode_ramrod_params *p);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+	struct list_head link;
+	u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+	u8 *mac;
+	u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+	struct bnx2x_mcast_obj *mcast_obj;
+
+	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+	unsigned long ramrod_flags;
+
+	struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+	/** TODO:
+	 *      - rename it to macs_num.
+	 *      - Add a new command type for handling pending commands
+	 *        (remove "zero semantics").
+	 *
+	 *  Length of mcast_list. If zero and ADD_CONT command - post
+	 *  pending commands.
+	 */
+	int mcast_list_len;
+};
+
+enum {
+	BNX2X_MCAST_CMD_ADD,
+	BNX2X_MCAST_CMD_CONT,
+	BNX2X_MCAST_CMD_DEL,
+	BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+	struct bnx2x_raw_obj raw;
+
+	union {
+		struct {
+		#define BNX2X_MCAST_BINS_NUM	256
+		#define BNX2X_MCAST_VEC_SZ	(BNX2X_MCAST_BINS_NUM / 64)
+			u64 vec[BNX2X_MCAST_VEC_SZ];
+
+			/** Number of BINs to clear. Should be updated
+			 *  immediately when a command arrives in order to
+			 *  properly create DEL commands.
+			 */
+			int num_bins_set;
+		} aprox_match;
+
+		struct {
+			struct list_head macs;
+			int num_macs_set;
+		} exact_match;
+	} registry;
+
+	/* Pending commands */
+	struct list_head pending_cmds_head;
+
+	/* A state that is set in raw.pstate, when there are pending commands */
+	int sched_state;
+
+	/* Maximal number of mcast MACs configured in one command */
+	int max_cmd_len;
+
+	/* Total number of currently pending MACs to configure: both
+	 * in the pending commands list and in the current command.
+	 */
+	int total_pending_num;
+
+	u8 engine_id;
+
+	/**
+	 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+	 */
+	int (*config_mcast)(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Fills the ramrod data during the RESTORE flow.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param start_idx Registry index to start from
+	 * @param rdata_idx Index in the ramrod data to start from
+	 *
+	 * @return -1 if we handled the whole registry or index of the last
+	 *         handled registry element.
+	 */
+	int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   int start_bin, int *rdata_idx);
+
+	int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_mcast_obj *o, int idx,
+			     union bnx2x_mcast_config_data *cfg_data, int cmd);
+
+	/** Checks if there are more mcast MACs to be set or a previous
+	 *  command is still pending.
+	 */
+	bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Set/Clear/Check SCHEDULED state of the object
+	 */
+	void (*set_sched)(struct bnx2x_mcast_obj *o);
+	void (*clear_sched)(struct bnx2x_mcast_obj *o);
+	bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+	/* Wait until all pending commands complete */
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Handle the internal object counters needed for proper
+	 * commands handling. Checks that the provided parameters are
+	 * feasible.
+	 */
+	int (*validate)(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Restore the values of internal counters in case of a failure.
+	 */
+	void (*revert)(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int old_num_bins);
+
+	int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+	void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+	/* Current amount of credit in the pool */
+	atomic_t	credit;
+
+	/* Maximum allowed credit. put() will check against it. */
+	int		pool_sz;
+
+	/*
+	 *  Allocate a pool table statically.
+	 *
+	 *  Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+	 *
+	 *  The set bit in the table will mean that the entry is available.
+	 */
+#define BNX2X_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
+	u64		pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+	/* Base pool offset (initialized differently */
+	int		base_pool_offset;
+
+	/**
+	 * Get the next free pool entry.
+	 *
+	 * @return true if there was a free entry in the pool
+	 */
+	bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+	/**
+	 * Return the entry back to the pool.
+	 *
+	 * @return true if entry is legal and has been successfully
+	 *         returned to the pool.
+	 */
+	bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+	/**
+	 * Get the requested amount of credit from the pool.
+	 *
+	 * @param cnt Amount of requested credit
+	 * @return true if the operation is successful
+	 */
+	bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Returns the credit to the pool.
+	 *
+	 * @param cnt Amount of credit to return
+	 * @return true if the operation is successful
+	 */
+	bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Reads the current amount of credit.
+	 */
+	int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+	/* RSS_MODE bits are mutually exclusive */
+	BNX2X_RSS_MODE_DISABLED,
+	BNX2X_RSS_MODE_REGULAR,
+	BNX2X_RSS_MODE_VLAN_PRI,
+	BNX2X_RSS_MODE_E1HOV_PRI,
+	BNX2X_RSS_MODE_IP_DSCP,
+
+	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+	BNX2X_RSS_IPV4,
+	BNX2X_RSS_IPV4_TCP,
+	BNX2X_RSS_IPV6,
+	BNX2X_RSS_IPV6_TCP,
+};
+
+struct bnx2x_config_rss_params {
+	struct bnx2x_rss_config_obj *rss_obj;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* BNX2X_RSS_X bits */
+	unsigned long	rss_flags;
+
+	/* Number hash bits to take into an account */
+	u8		rss_result_mask;
+
+	/* Indirection table */
+	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	/* RSS hash values */
+	u32		rss_key[10];
+
+	/* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+	u16		toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+	struct bnx2x_raw_obj	raw;
+
+	/* RSS engine to use */
+	u8			engine_id;
+
+	/* Last configured indirection table */
+	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	int (*config_rss)(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_Q_UPDATE_IN_VLAN_REM,
+	BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_ANTI_SPOOF,
+	BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+	BNX2X_Q_UPDATE_ACTIVATE,
+	BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+	BNX2X_Q_STATE_RESET,
+	BNX2X_Q_STATE_INITIALIZED,
+	BNX2X_Q_STATE_ACTIVE,
+	BNX2X_Q_STATE_INACTIVE,
+	BNX2X_Q_STATE_STOPPED,
+	BNX2X_Q_STATE_TERMINATED,
+	BNX2X_Q_STATE_FLRED,
+	BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+	BNX2X_Q_CMD_INIT,
+	BNX2X_Q_CMD_SETUP,
+	BNX2X_Q_CMD_DEACTIVATE,
+	BNX2X_Q_CMD_ACTIVATE,
+	BNX2X_Q_CMD_UPDATE,
+	BNX2X_Q_CMD_UPDATE_TPA,
+	BNX2X_Q_CMD_HALT,
+	BNX2X_Q_CMD_CFC_DEL,
+	BNX2X_Q_CMD_TERMINATE,
+	BNX2X_Q_CMD_EMPTY,
+	BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+	BNX2X_Q_FLG_TPA,
+	BNX2X_Q_FLG_STATS,
+	BNX2X_Q_FLG_ZERO_STATS,
+	BNX2X_Q_FLG_ACTIVE,
+	BNX2X_Q_FLG_OV,
+	BNX2X_Q_FLG_VLAN,
+	BNX2X_Q_FLG_COS,
+	BNX2X_Q_FLG_HC,
+	BNX2X_Q_FLG_HC_EN,
+	BNX2X_Q_FLG_DHC,
+	BNX2X_Q_FLG_FCOE,
+	BNX2X_Q_FLG_LEADING_RSS,
+	BNX2X_Q_FLG_MCAST,
+	BNX2X_Q_FLG_DEF_VLAN,
+	BNX2X_Q_FLG_TX_SWITCH,
+	BNX2X_Q_FLG_TX_SEC,
+	BNX2X_Q_FLG_ANTI_SPOOF,
+	BNX2X_Q_FLG_SILENT_VLAN_REM
+};
+
+/* Queue type options: queue type may be a compination of below. */
+enum bnx2x_q_type {
+	/** TODO: Consider moving both these flags into the init()
+	 *        ramrod params.
+	 */
+	BNX2X_Q_TYPE_HAS_RX,
+	BNX2X_Q_TYPE_HAS_TX,
+};
+
+struct bnx2x_queue_init_params {
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} tx;
+
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} rx;
+
+	/* CID context in the host memory */
+	struct eth_context *cxt;
+};
+
+struct bnx2x_queue_update_params {
+	unsigned long	update_flags; /* BNX2X_Q_UPDATE_XX bits */
+	u16		def_vlan;
+	u16		silent_removal_value;
+	u16		silent_removal_mask;
+};
+
+struct rxq_pause_params {
+	u16		bd_th_lo;
+	u16		bd_th_hi;
+	u16		rcq_th_lo;
+	u16		rcq_th_hi;
+	u16		sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+	/* valid iff BNX2X_Q_FLG_STATS */
+	u8		stat_id;
+
+	u8		spcl_id;
+	u16		mtu;
+};
+
+struct bnx2x_rxq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+	dma_addr_t	sge_map;
+	dma_addr_t	rcq_map;
+	dma_addr_t	rcq_np_map;
+
+	u16		drop_flags;
+	u16		buf_sz;
+	u8		fw_sb_id;
+	u8		cl_qzone_id;
+
+	/* valid iff BNX2X_Q_FLG_TPA */
+	u16		tpa_agg_sz;
+	u16		sge_buf_sz;
+	u8		max_sges_pkt;
+	u8		max_tpa_queues;
+	u8		rss_engine_id;
+
+	u8		cache_line_log;
+
+	u8		sb_cq_index;
+
+	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+	u16 silent_removal_value;
+	u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+
+	u8		fw_sb_id;
+	u8		sb_cq_index;
+	u8		cos;		/* valid iff BNX2X_Q_FLG_COS */
+	u16		traffic_type;
+	/* equals to the leading rss client id, used for TX classification*/
+	u8		tss_leading_cl_id;
+
+	/* valid iff BNX2X_Q_FLG_DEF_VLAN */
+	u16		default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+	struct rxq_pause_params pause;
+	struct bnx2x_general_setup_params gen_params;
+	struct bnx2x_rxq_setup_params rxq_params;
+	struct bnx2x_txq_setup_params txq_params;
+	unsigned long flags;
+};
+
+
+struct bnx2x_queue_state_params {
+	struct bnx2x_queue_sp_obj *q_obj;
+
+	/* Current command */
+	enum bnx2x_queue_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_queue_update_params update;
+		struct bnx2x_queue_setup_params   setup;
+		struct bnx2x_queue_init_params	  init;
+	} params;
+};
+
+struct bnx2x_queue_sp_obj {
+	u32		cid;
+	u8		cl_id;
+	u8		func_id;
+
+	enum bnx2x_q_state state, next_state;
+
+	/* bits from enum bnx2x_q_type */
+	unsigned long	type;
+
+	/* BNX2X_Q_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long	pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Sets the pending bit according to the requested transition.
+	 */
+	int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+			   struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_queue_sp_obj *o,
+			    enum bnx2x_queue_cmd);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_queue_sp_obj *o,
+			 enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum bnx2x_func_state {
+	BNX2X_F_STATE_RESET,
+	BNX2X_F_STATE_INITIALIZED,
+	BNX2X_F_STATE_STARTED,
+	BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+	BNX2X_F_CMD_HW_INIT,
+	BNX2X_F_CMD_START,
+	BNX2X_F_CMD_STOP,
+	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+	/* Multi Function mode:
+	 *	- Single Function
+	 *	- Switch Dependent
+	 *	- Switch Independent
+	 */
+	u16 mf_mode;
+
+	/* Switch Dependent mode outer VLAN tag */
+	u16 sd_vlan_tag;
+
+	/* Function cos mode */
+	u8 network_cos_mode;
+};
+
+struct bnx2x_func_state_params {
+	struct bnx2x_func_sp_obj *f_obj;
+
+	/* Current command */
+	enum bnx2x_func_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_func_hw_init_params hw_init;
+		struct bnx2x_func_hw_reset_params hw_reset;
+		struct bnx2x_func_start_params start;
+	} params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+	/* Init tool + runtime initialization:
+	 *      - Common Chip
+	 *      - Common (per Path)
+	 *      - Port
+	 *      - Function phases
+	 */
+	int (*init_hw_cmn_chip)(struct bnx2x *bp);
+	int (*init_hw_cmn)(struct bnx2x *bp);
+	int (*init_hw_port)(struct bnx2x *bp);
+	int (*init_hw_func)(struct bnx2x *bp);
+
+	/* Reset Function HW: Common, Port, Function phases. */
+	void (*reset_hw_cmn)(struct bnx2x *bp);
+	void (*reset_hw_port)(struct bnx2x *bp);
+	void (*reset_hw_func)(struct bnx2x *bp);
+
+	/* Init/Free GUNZIP resources */
+	int (*gunzip_init)(struct bnx2x *bp);
+	void (*gunzip_end)(struct bnx2x *bp);
+
+	/* Prepare/Release FW resources */
+	int (*init_fw)(struct bnx2x *bp);
+	void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+	enum bnx2x_func_state	state, next_state;
+
+	/* BNX2X_FUNC_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long		pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void			*rdata;
+	dma_addr_t		rdata_mapping;
+
+	/* this mutex validates that when pending flag is taken, the next
+	 * ramrod to be sent will be the one set the pending bit
+	 */
+	struct mutex		one_pending_mutex;
+
+	/* Driver interface */
+	struct bnx2x_func_sp_drv_ops	*drv;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_func_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				struct bnx2x_func_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_func_sp_obj *o,
+			    enum bnx2x_func_cmd cmd);
+
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+			 enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+	struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params);
+
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 cid,
+			  u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+			  unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_ramrod_params *p,
+			struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o);
+
+/**
+ * Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @param bp
+ * @param p Command parameters
+ *
+ * @return 0 - if operation was successfull and there is no pending completions,
+ *         positive number - if there are pending completions,
+ *         negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate,
+			  bnx2x_obj_type type);
+
+/**
+ * Configure multicast MACs list. May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * @param bp
+ * @param p
+ * @param command to execute: BNX2X_MCAST_CMD_X
+ *
+ * @return 0 is operation was sucessfull and there are no pending completions,
+ *         negative if there were errors, positive if there are pending
+ *         completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p, u8 func_id,
+				 u8 func_num);
+
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type);
+
+/**
+ * Updates RSS configuration according to provided parameters.
+ *
+ * @param bp
+ * @param p
+ *
+ * @return 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p);
+
+/**
+ * Return the current ind_table configuration.
+ *
+ * @param bp
+ * @param ind_table buffer to fill with the current indirection
+ *                  table content. Should be at least
+ *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table);
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index e535bfa..54c07f5 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -14,121 +14,12 @@
  * Statistics and Link management by Yitchak Gertner
  *
  */
-#include "bnx2x_cmn.h"
 #include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+
 
 /* Statistics */
 
-/****************************************************************************
-* Macros
-****************************************************************************/
-
-/* sum[hi:lo] += add[hi:lo] */
-#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
-	do { \
-		s_lo += a_lo; \
-		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
-	} while (0)
-
-/* difference = minuend - subtrahend */
-#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
-	do { \
-		if (m_lo < s_lo) { \
-			/* underflow */ \
-			d_hi = m_hi - s_hi; \
-			if (d_hi > 0) { \
-				/* we can 'loan' 1 */ \
-				d_hi--; \
-				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
-			} else { \
-				/* m_hi <= s_hi */ \
-				d_hi = 0; \
-				d_lo = 0; \
-			} \
-		} else { \
-			/* m_lo >= s_lo */ \
-			if (m_hi < s_hi) { \
-				d_hi = 0; \
-				d_lo = 0; \
-			} else { \
-				/* m_hi >= s_hi */ \
-				d_hi = m_hi - s_hi; \
-				d_lo = m_lo - s_lo; \
-			} \
-		} \
-	} while (0)
-
-#define UPDATE_STAT64(s, t) \
-	do { \
-		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
-			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
-		pstats->mac_stx[0].t##_hi = new->s##_hi; \
-		pstats->mac_stx[0].t##_lo = new->s##_lo; \
-		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
-		       pstats->mac_stx[1].t##_lo, diff.lo); \
-	} while (0)
-
-#define UPDATE_STAT64_NIG(s, t) \
-	do { \
-		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
-			diff.lo, new->s##_lo, old->s##_lo); \
-		ADD_64(estats->t##_hi, diff.hi, \
-		       estats->t##_lo, diff.lo); \
-	} while (0)
-
-/* sum[hi:lo] += add */
-#define ADD_EXTEND_64(s_hi, s_lo, a) \
-	do { \
-		s_lo += a; \
-		s_hi += (s_lo < a) ? 1 : 0; \
-	} while (0)
-
-#define UPDATE_EXTEND_STAT(s) \
-	do { \
-		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
-			      pstats->mac_stx[1].s##_lo, \
-			      new->s); \
-	} while (0)
-
-#define UPDATE_EXTEND_TSTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
-		old_tclient->s = tclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-#define UPDATE_EXTEND_USTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-		old_uclient->s = uclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-#define UPDATE_EXTEND_XSTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
-		old_xclient->s = xclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-/* minuend -= subtrahend */
-#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
-	do { \
-		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
-	} while (0)
-
-/* minuend[hi:lo] -= subtrahend */
-#define SUB_EXTEND_64(m_hi, m_lo, s) \
-	do { \
-		SUB_64(m_hi, 0, m_lo, s); \
-	} while (0)
-
-#define SUB_EXTEND_USTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
 /*
  * General service functions
  */
@@ -149,12 +40,16 @@
  * Init service functions
  */
 
-
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
 static void bnx2x_storm_stats_post(struct bnx2x *bp)
 {
 	if (!bp->stats_pending) {
-		struct common_query_ramrod_data ramrod_data = {0};
-		int i, rc;
+		int rc;
 
 		spin_lock_bh(&bp->stats_lock);
 
@@ -163,14 +58,19 @@
 			return;
 		}
 
-		ramrod_data.drv_counter = bp->stats_counter++;
-		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
-		for_each_eth_queue(bp, i)
-			ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
+		bp->fw_stats_req->hdr.drv_stats_counter =
+			cpu_to_le16(bp->stats_counter++);
 
+		DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+			bp->fw_stats_req->hdr.drv_stats_counter);
+
+
+
+		/* send FW stats ramrod */
 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
-				   ((u32 *)&ramrod_data)[1],
-				   ((u32 *)&ramrod_data)[0], 1);
+				   U64_HI(bp->fw_stats_req_mapping),
+				   U64_LO(bp->fw_stats_req_mapping),
+				   NONE_CONNECTION_TYPE);
 		if (rc == 0)
 			bp->stats_pending = 1;
 
@@ -230,7 +130,7 @@
 			break;
 		}
 		cnt--;
-		msleep(1);
+		usleep_range(1000, 1000);
 	}
 	return 1;
 }
@@ -338,69 +238,8 @@
 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
 				   true, DMAE_COMP_GRC);
 
-	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-
-		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
-				   NIG_REG_INGRESS_BMAC0_MEM);
-
-		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
-		   BIGMAC_REGISTER_TX_STAT_GTBYT */
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		if (CHIP_IS_E1x(bp)) {
-			dmae->src_addr_lo = (mac_addr +
-				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-			dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
-				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-		} else {
-			dmae->src_addr_lo = (mac_addr +
-				     BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
-			dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
-				     BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
-		}
-
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
-		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_hi = 0;
-		if (CHIP_IS_E1x(bp)) {
-			dmae->src_addr_lo = (mac_addr +
-					     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-			dmae->dst_addr_lo =
-				U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac1_stats, rx_stat_gr64_lo));
-			dmae->dst_addr_hi =
-				U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac1_stats, rx_stat_gr64_lo));
-			dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
-				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-		} else {
-			dmae->src_addr_lo =
-				(mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
-			dmae->dst_addr_lo =
-				U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac2_stats, rx_stat_gr64_lo));
-			dmae->dst_addr_hi =
-				U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac2_stats, rx_stat_gr64_lo));
-			dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
-				     BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
-		}
-
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
-
+	/* EMAC is special */
+	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
 		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
 
 		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
@@ -445,46 +284,122 @@
 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
 		dmae->comp_addr_hi = 0;
 		dmae->comp_val = 1;
+	} else {
+		u32 tx_src_addr_lo, rx_src_addr_lo;
+		u16 rx_len, tx_len;
+
+		/* configure the params according to MAC type */
+		switch (bp->link_vars.mac_type) {
+		case MAC_TYPE_BMAC:
+			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+					   NIG_REG_INGRESS_BMAC0_MEM);
+
+			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+			   BIGMAC_REGISTER_TX_STAT_GTBYT */
+			if (CHIP_IS_E1x(bp)) {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+			} else {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+			}
+			break;
+
+		case MAC_TYPE_UMAC: /* handled by MSTAT */
+		case MAC_TYPE_XMAC: /* handled by MSTAT */
+		default:
+			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+			tx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+			rx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+			tx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_tx) >> 2;
+			rx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_rx) >> 2;
+			break;
+		}
+
+		/* TX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = tx_src_addr_lo;
+		dmae->src_addr_hi = 0;
+		dmae->len = tx_len;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* RX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_hi = 0;
+		dmae->src_addr_lo = rx_src_addr_lo;
+		dmae->dst_addr_lo =
+			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->dst_addr_hi =
+			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->len = rx_len;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
 	}
 
 	/* NIG */
+	if (!CHIP_IS_E3(bp)) {
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = opcode;
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+						 true, DMAE_COMP_PCI);
 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
 				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
 	dmae->src_addr_hi = 0;
 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
 	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
-	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-	dmae->comp_addr_hi = 0;
-	dmae->comp_val = 1;
 
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = opcode;
-	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
-				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt0_lo));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt0_lo));
-	dmae->len = (2*sizeof(u32)) >> 2;
-	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-	dmae->comp_addr_hi = 0;
-	dmae->comp_val = 1;
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
-					 true, DMAE_COMP_PCI);
-	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
-				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt1_lo));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt1_lo));
-	dmae->len = (2*sizeof(u32)) >> 2;
 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
 	dmae->comp_val = DMAE_COMP_VAL;
@@ -566,7 +481,8 @@
 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
-		UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
@@ -580,13 +496,13 @@
 				tx_stat_etherstatspkts512octetsto1023octets);
 		UPDATE_STAT64(tx_stat_gt1518,
 				tx_stat_etherstatspkts1024octetsto1522octets);
-		UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
-		UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
-		UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
-		UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
 		UPDATE_STAT64(tx_stat_gterr,
 				tx_stat_dot3statsinternalmactransmiterrors);
-		UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
 
 	} else {
 		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
@@ -600,7 +516,7 @@
 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
-		UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
@@ -614,19 +530,96 @@
 				tx_stat_etherstatspkts512octetsto1023octets);
 		UPDATE_STAT64(tx_stat_gt1518,
 				tx_stat_etherstatspkts1024octetsto1522octets);
-		UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
-		UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
-		UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
-		UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
 		UPDATE_STAT64(tx_stat_gterr,
 				tx_stat_dot3statsinternalmactransmiterrors);
-		UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
 	}
 
 	estats->pause_frames_received_hi =
-				pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
 	estats->pause_frames_received_lo =
-				pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+
+	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+	ADD_STAT64(stats_tx.tx_gt127,
+			tx_stat_etherstatspkts65octetsto127octets);
+	ADD_STAT64(stats_tx.tx_gt255,
+			tx_stat_etherstatspkts128octetsto255octets);
+	ADD_STAT64(stats_tx.tx_gt511,
+			tx_stat_etherstatspkts256octetsto511octets);
+	ADD_STAT64(stats_tx.tx_gt1023,
+			tx_stat_etherstatspkts512octetsto1023octets);
+	ADD_STAT64(stats_tx.tx_gt1518,
+			tx_stat_etherstatspkts1024octetsto1522octets);
+	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+	ADD_STAT64(stats_tx.tx_gterr,
+			tx_stat_dot3statsinternalmactransmiterrors);
+	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+	ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
+	       new->stats_tx.tx_gt1518_hi,
+	       estats->etherstatspkts1024octetsto1522octets_lo,
+	       new->stats_tx.tx_gt1518_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt2047_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt2047_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt4095_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt4095_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt9216_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt9216_lo);
+
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt16383_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt16383_lo);
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
 
 	estats->pause_frames_sent_hi =
 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
@@ -702,15 +695,26 @@
 		u32 hi;
 	} diff;
 
-	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
+	switch (bp->link_vars.mac_type) {
+	case MAC_TYPE_BMAC:
 		bnx2x_bmac_stats_update(bp);
+		break;
 
-	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
+	case MAC_TYPE_EMAC:
 		bnx2x_emac_stats_update(bp);
+		break;
 
-	else { /* unreached */
+	case MAC_TYPE_UMAC:
+	case MAC_TYPE_XMAC:
+		bnx2x_mstat_stats_update(bp);
+		break;
+
+	case MAC_TYPE_NONE: /* unreached */
 		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
 		return -1;
+
+	default: /* unreached */
+		BNX2X_ERR("Unknown MAC type\n");
 	}
 
 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
@@ -718,9 +722,12 @@
 	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
 		      new->brb_truncate - old->brb_truncate);
 
-	UPDATE_STAT64_NIG(egress_mac_pkt0,
+	if (!CHIP_IS_E3(bp)) {
+		UPDATE_STAT64_NIG(egress_mac_pkt0,
 					etherstatspkts1024octetsto1522octets);
-	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
+		UPDATE_STAT64_NIG(egress_mac_pkt1,
+					etherstatspktsover1522octets);
+	}
 
 	memcpy(old, new, sizeof(struct nig_stats));
 
@@ -746,11 +753,13 @@
 
 static int bnx2x_storm_stats_update(struct bnx2x *bp)
 {
-	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
 	struct tstorm_per_port_stats *tport =
-					&stats->tstorm_common.port_statistics;
+				&bp->fw_stats_data->port.tstorm_port_statistics;
+	struct tstorm_per_pf_stats *tfunc =
+				&bp->fw_stats_data->pf.tstorm_pf_statistics;
 	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
 	int i;
 	u16 cur_stats_counter;
 
@@ -761,6 +770,35 @@
 	cur_stats_counter = bp->stats_counter - 1;
 	spin_unlock_bh(&bp->stats_lock);
 
+	/* are storm stats valid? */
+	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
+		   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
+		   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
+		   "  cstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
+		   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
 	memcpy(&(fstats->total_bytes_received_hi),
 	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
 	       sizeof(struct host_func_stats) - 2*sizeof(u32));
@@ -770,94 +808,84 @@
 	estats->etherstatsoverrsizepkts_lo = 0;
 	estats->no_buff_discard_hi = 0;
 	estats->no_buff_discard_lo = 0;
+	estats->total_tpa_aggregations_hi = 0;
+	estats->total_tpa_aggregations_lo = 0;
+	estats->total_tpa_aggregated_frames_hi = 0;
+	estats->total_tpa_aggregated_frames_lo = 0;
+	estats->total_tpa_bytes_hi = 0;
+	estats->total_tpa_bytes_lo = 0;
 
 	for_each_eth_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
-		int cl_id = fp->cl_id;
-		struct tstorm_per_client_stats *tclient =
-				&stats->tstorm_common.client_statistics[cl_id];
-		struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
-		struct ustorm_per_client_stats *uclient =
-				&stats->ustorm_common.client_statistics[cl_id];
-		struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
-		struct xstorm_per_client_stats *xclient =
-				&stats->xstorm_common.client_statistics[cl_id];
-		struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
+		struct tstorm_per_queue_stats *tclient =
+			&bp->fw_stats_data->queue_stats[i].
+			tstorm_queue_statistics;
+		struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+		struct ustorm_per_queue_stats *uclient =
+			&bp->fw_stats_data->queue_stats[i].
+			ustorm_queue_statistics;
+		struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+		struct xstorm_per_queue_stats *xclient =
+			&bp->fw_stats_data->queue_stats[i].
+			xstorm_queue_statistics;
+		struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
 		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
 		u32 diff;
 
-		/* are storm stats valid? */
-		if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
-			   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, xclient->stats_counter, cur_stats_counter + 1);
-			return -1;
-		}
-		if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
-			   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, tclient->stats_counter, cur_stats_counter + 1);
-			return -2;
-		}
-		if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
-			   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, uclient->stats_counter, cur_stats_counter + 1);
-			return -4;
-		}
+		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
+				    "bcast_sent 0x%x mcast_sent 0x%x\n",
+		   i, xclient->ucast_pkts_sent,
+		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
 
+		DP(BNX2X_MSG_STATS, "---------------\n");
+
+		qstats->total_broadcast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_bcast_bytes.hi);
+		qstats->total_broadcast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_bcast_bytes.lo);
+
+		qstats->total_multicast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_mcast_bytes.hi);
+		qstats->total_multicast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_mcast_bytes.lo);
+
+		qstats->total_unicast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_ucast_bytes.hi);
+		qstats->total_unicast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+
+		/*
+		 * sum to total_bytes_received all
+		 * unicast/multicast/broadcast
+		 */
 		qstats->total_bytes_received_hi =
-			le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
+			qstats->total_broadcast_bytes_received_hi;
 		qstats->total_bytes_received_lo =
-			le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
+			qstats->total_broadcast_bytes_received_lo;
 
 		ADD_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(tclient->rcv_multicast_bytes.hi),
+		       qstats->total_multicast_bytes_received_hi,
 		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(tclient->rcv_multicast_bytes.lo));
+		       qstats->total_multicast_bytes_received_lo);
 
 		ADD_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(tclient->rcv_unicast_bytes.hi),
+		       qstats->total_unicast_bytes_received_hi,
 		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(tclient->rcv_unicast_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+		       qstats->total_unicast_bytes_received_lo);
 
 		qstats->valid_bytes_received_hi =
 					qstats->total_bytes_received_hi;
 		qstats->valid_bytes_received_lo =
 					qstats->total_bytes_received_lo;
 
-		qstats->error_bytes_received_hi =
-				le32_to_cpu(tclient->rcv_error_bytes.hi);
-		qstats->error_bytes_received_lo =
-				le32_to_cpu(tclient->rcv_error_bytes.lo);
 
-		ADD_64(qstats->total_bytes_received_hi,
-		       qstats->error_bytes_received_hi,
-		       qstats->total_bytes_received_lo,
-		       qstats->error_bytes_received_lo);
-
-		UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
+		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
 					total_unicast_packets_received);
-		UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
+		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
 					total_multicast_packets_received);
-		UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
+		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
 					total_broadcast_packets_received);
-		UPDATE_EXTEND_TSTAT(packets_too_big_discard,
+		UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
 					etherstatsoverrsizepkts);
 		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
 
@@ -871,30 +899,78 @@
 		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
 		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
 
+		qstats->total_broadcast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->bcast_bytes_sent.hi);
+		qstats->total_broadcast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->bcast_bytes_sent.lo);
+
+		qstats->total_multicast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->mcast_bytes_sent.hi);
+		qstats->total_multicast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->mcast_bytes_sent.lo);
+
+		qstats->total_unicast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->ucast_bytes_sent.hi);
+		qstats->total_unicast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->ucast_bytes_sent.lo);
+		/*
+		 * sum to total_bytes_transmitted all
+		 * unicast/multicast/broadcast
+		 */
 		qstats->total_bytes_transmitted_hi =
-				le32_to_cpu(xclient->unicast_bytes_sent.hi);
+				qstats->total_unicast_bytes_transmitted_hi;
 		qstats->total_bytes_transmitted_lo =
-				le32_to_cpu(xclient->unicast_bytes_sent.lo);
+				qstats->total_unicast_bytes_transmitted_lo;
 
 		ADD_64(qstats->total_bytes_transmitted_hi,
-		       le32_to_cpu(xclient->multicast_bytes_sent.hi),
+		       qstats->total_broadcast_bytes_transmitted_hi,
 		       qstats->total_bytes_transmitted_lo,
-		       le32_to_cpu(xclient->multicast_bytes_sent.lo));
+		       qstats->total_broadcast_bytes_transmitted_lo);
 
 		ADD_64(qstats->total_bytes_transmitted_hi,
-		       le32_to_cpu(xclient->broadcast_bytes_sent.hi),
+		       qstats->total_multicast_bytes_transmitted_hi,
 		       qstats->total_bytes_transmitted_lo,
-		       le32_to_cpu(xclient->broadcast_bytes_sent.lo));
+		       qstats->total_multicast_bytes_transmitted_lo);
 
-		UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
+		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
 					total_unicast_packets_transmitted);
-		UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
+		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
 					total_multicast_packets_transmitted);
-		UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
+		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
 					total_broadcast_packets_transmitted);
 
-		old_tclient->checksum_discard = tclient->checksum_discard;
-		old_tclient->ttl0_discard = tclient->ttl0_discard;
+		UPDATE_EXTEND_TSTAT(checksum_discard,
+				    total_packets_received_checksum_discarded);
+		UPDATE_EXTEND_TSTAT(ttl0_discard,
+				    total_packets_received_ttl0_discarded);
+
+		UPDATE_EXTEND_XSTAT(error_drop_pkts,
+				    total_transmitted_dropped_packets_error);
+
+		/* TPA aggregations completed */
+		UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+		/* Number of network frames aggregated by TPA */
+		UPDATE_EXTEND_USTAT(coalesced_pkts,
+				    total_tpa_aggregated_frames);
+		/* Total number of bytes in completed TPA aggregations */
+		qstats->total_tpa_bytes_lo =
+			le32_to_cpu(uclient->coalesced_bytes.lo);
+		qstats->total_tpa_bytes_hi =
+			le32_to_cpu(uclient->coalesced_bytes.hi);
+
+		/* TPA stats per-function */
+		ADD_64(estats->total_tpa_aggregations_hi,
+		       qstats->total_tpa_aggregations_hi,
+		       estats->total_tpa_aggregations_lo,
+		       qstats->total_tpa_aggregations_lo);
+		ADD_64(estats->total_tpa_aggregated_frames_hi,
+		       qstats->total_tpa_aggregated_frames_hi,
+		       estats->total_tpa_aggregated_frames_lo,
+		       qstats->total_tpa_aggregated_frames_lo);
+		ADD_64(estats->total_tpa_bytes_hi,
+		       qstats->total_tpa_bytes_hi,
+		       estats->total_tpa_bytes_lo,
+		       qstats->total_tpa_bytes_lo);
 
 		ADD_64(fstats->total_bytes_received_hi,
 		       qstats->total_bytes_received_hi,
@@ -933,10 +1009,6 @@
 		       fstats->valid_bytes_received_lo,
 		       qstats->valid_bytes_received_lo);
 
-		ADD_64(estats->error_bytes_received_hi,
-		       qstats->error_bytes_received_hi,
-		       estats->error_bytes_received_lo,
-		       qstats->error_bytes_received_lo);
 		ADD_64(estats->etherstatsoverrsizepkts_hi,
 		       qstats->etherstatsoverrsizepkts_hi,
 		       estats->etherstatsoverrsizepkts_lo,
@@ -950,9 +1022,19 @@
 	       fstats->total_bytes_received_lo,
 	       estats->rx_stat_ifhcinbadoctets_lo);
 
+	ADD_64(fstats->total_bytes_received_hi,
+	       tfunc->rcv_error_bytes.hi,
+	       fstats->total_bytes_received_lo,
+	       tfunc->rcv_error_bytes.lo);
+
 	memcpy(estats, &(fstats->total_bytes_received_hi),
 	       sizeof(struct host_func_stats) - 2*sizeof(u32));
 
+	ADD_64(estats->error_bytes_received_hi,
+	       tfunc->rcv_error_bytes.hi,
+	       estats->error_bytes_received_lo,
+	       tfunc->rcv_error_bytes.lo);
+
 	ADD_64(estats->etherstatsoverrsizepkts_hi,
 	       estats->rx_stat_dot3statsframestoolong_hi,
 	       estats->etherstatsoverrsizepkts_lo,
@@ -965,8 +1047,8 @@
 	if (bp->port.pmf) {
 		estats->mac_filter_discard =
 				le32_to_cpu(tport->mac_filter_discard);
-		estats->xxoverflow_discard =
-				le32_to_cpu(tport->xxoverflow_discard);
+		estats->mf_tag_discard =
+				le32_to_cpu(tport->mf_tag_discard);
 		estats->brb_truncate_discard =
 				le32_to_cpu(tport->brb_truncate_discard);
 		estats->mac_discard = le32_to_cpu(tport->mac_discard);
@@ -1023,7 +1105,7 @@
 	nstats->rx_frame_errors =
 		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
 	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
-	nstats->rx_missed_errors = estats->xxoverflow_discard;
+	nstats->rx_missed_errors = 0;
 
 	nstats->rx_errors = nstats->rx_length_errors +
 			    nstats->rx_over_errors +
@@ -1065,10 +1147,27 @@
 	}
 }
 
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+	u32 val;
+
+	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+		val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+			return true;
+	}
+
+	return false;
+}
+
 static void bnx2x_stats_update(struct bnx2x *bp)
 {
 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 
+	if (bnx2x_edebug_stats_stopped(bp))
+		return;
+
 	if (*stats_comp != DMAE_COMP_VAL)
 		return;
 
@@ -1088,8 +1187,7 @@
 		struct bnx2x_eth_stats *estats = &bp->eth_stats;
 		int i;
 
-		printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
-		       bp->dev->name,
+		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
 		       estats->brb_drop_lo, estats->brb_truncate_lo);
 
 		for_each_eth_queue(bp, i) {
@@ -1149,6 +1247,7 @@
 		else
 			dmae->opcode = bnx2x_dmae_opcode_add_comp(
 						opcode, DMAE_COMP_PCI);
+
 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1235,13 +1334,9 @@
 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
 {
 	enum bnx2x_stats_state state;
-
 	if (unlikely(bp->panic))
 		return;
-
 	bnx2x_stats_stm[bp->stats_state][event].action(bp);
-
-	/* Protect a state change flow */
 	spin_lock_bh(&bp->stats_lock);
 	state = bp->stats_state;
 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -1297,7 +1392,7 @@
 	func_stx = bp->func_stx;
 
 	for (vn = VN_0; vn < vn_max; vn++) {
-		int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
+		int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
 
 		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
 		bnx2x_func_stats_init(bp);
@@ -1339,12 +1434,97 @@
 	bnx2x_stats_comp(bp);
 }
 
+/**
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ *
+ * @param bp
+ */
+static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+	int i;
+	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+	dma_addr_t cur_data_offset;
+	struct stats_query_entry *cur_query_entry;
+
+	stats_hdr->cmd_num = bp->fw_stats_num;
+	stats_hdr->drv_stats_counter = 0;
+
+	/* storm_counters struct contains the counters of completed
+	 * statistics requests per storm which are incremented by FW
+	 * each time it completes hadning a statistics ramrod. We will
+	 * check these counters in the timer handler and discard a
+	 * (statistics) ramrod completion.
+	 */
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+	stats_hdr->stats_counters_addrs.hi =
+		cpu_to_le32(U64_HI(cur_data_offset));
+	stats_hdr->stats_counters_addrs.lo =
+		cpu_to_le32(U64_LO(cur_data_offset));
+
+	/* prepare to the first stats ramrod (will be completed with
+	 * the counters equal to zero) - init counters to somethig different.
+	 */
+	memset(&bp->fw_stats_data->storm_counters, 0xff,
+	       sizeof(struct stats_counter));
+
+	/**** Port FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, port);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PORT;
+	/* For port query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	/* For port query funcID is a DONT CARE */
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** PF FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, pf);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PF;
+	/* For PF query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** Clients' queries ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+	for_each_eth_queue(bp, i) {
+		cur_query_entry =
+			&bp->fw_stats_req->
+					query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+
+		cur_query_entry->kind = STATS_TYPE_QUEUE;
+		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+
+		cur_data_offset += sizeof(struct per_queue_stats);
+	}
+}
+
 void bnx2x_stats_init(struct bnx2x *bp)
 {
-	int port = BP_PORT(bp);
+	int /*abs*/port = BP_PORT(bp);
 	int mb_idx = BP_FW_MB_IDX(bp);
 	int i;
-	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
 
 	bp->stats_pending = 0;
 	bp->executer_idx = 0;
@@ -1362,45 +1542,35 @@
 	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
 	   bp->port.port_stx, bp->func_stx);
 
+	port = BP_PORT(bp);
 	/* port stats */
 	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
 	bp->port.old_nig_stats.brb_discard =
 			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
 	bp->port.old_nig_stats.brb_truncate =
 			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
-	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
-		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
-	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
-		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+	if (!CHIP_IS_E3(bp)) {
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+	}
 
 	/* function stats */
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
-		memset(&fp->old_tclient, 0,
-		       sizeof(struct tstorm_per_client_stats));
-		memset(&fp->old_uclient, 0,
-		       sizeof(struct ustorm_per_client_stats));
-		memset(&fp->old_xclient, 0,
-		       sizeof(struct xstorm_per_client_stats));
-		memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
+		memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
+		memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
+		memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+		memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
 	}
 
-	/* FW stats are currently collected for ETH clients only */
-	for_each_eth_queue(bp, i) {
-		/* Set initial stats counter in the stats ramrod data to -1 */
-		int cl_id = bp->fp[i].cl_id;
+	/* Prepare statistics ramrod data */
+	bnx2x_prep_fw_stats_req(bp);
 
-		stats->xstorm_common.client_statistics[cl_id].
-			stats_counter = 0xffff;
-		stats->ustorm_common.client_statistics[cl_id].
-			stats_counter = 0xffff;
-		stats->tstorm_common.client_statistics[cl_id].
-			stats_counter = 0xffff;
-	}
-
-	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
-	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
+	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+	memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
 
 	bp->stats_state = STATS_STATE_DISABLED;
 
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 45d14d8..5d8ce2f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -14,48 +14,11 @@
  * Statistics and Link management by Yitchak Gertner
  *
  */
-
 #ifndef BNX2X_STATS_H
 #define BNX2X_STATS_H
 
 #include <linux/types.h>
 
-struct bnx2x_eth_q_stats {
-	u32 total_bytes_received_hi;
-	u32 total_bytes_received_lo;
-	u32 total_bytes_transmitted_hi;
-	u32 total_bytes_transmitted_lo;
-	u32 total_unicast_packets_received_hi;
-	u32 total_unicast_packets_received_lo;
-	u32 total_multicast_packets_received_hi;
-	u32 total_multicast_packets_received_lo;
-	u32 total_broadcast_packets_received_hi;
-	u32 total_broadcast_packets_received_lo;
-	u32 total_unicast_packets_transmitted_hi;
-	u32 total_unicast_packets_transmitted_lo;
-	u32 total_multicast_packets_transmitted_hi;
-	u32 total_multicast_packets_transmitted_lo;
-	u32 total_broadcast_packets_transmitted_hi;
-	u32 total_broadcast_packets_transmitted_lo;
-	u32 valid_bytes_received_hi;
-	u32 valid_bytes_received_lo;
-
-	u32 error_bytes_received_hi;
-	u32 error_bytes_received_lo;
-	u32 etherstatsoverrsizepkts_hi;
-	u32 etherstatsoverrsizepkts_lo;
-	u32 no_buff_discard_hi;
-	u32 no_buff_discard_lo;
-
-	u32 driver_xoff;
-	u32 rx_err_discard_pkt;
-	u32 rx_skb_alloc_failed;
-	u32 hw_csum_err;
-};
-
-#define Q_STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
-
 struct nig_stats {
 	u32 brb_discard;
 	u32 brb_packet;
@@ -212,7 +175,7 @@
 	u32 brb_truncate_lo;
 
 	u32 mac_filter_discard;
-	u32 xxoverflow_discard;
+	u32 mf_tag_discard;
 	u32 brb_truncate_discard;
 	u32 mac_discard;
 
@@ -222,16 +185,197 @@
 	u32 hw_csum_err;
 
 	u32 nig_timer_max;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
 };
 
-#define STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 
-/* Forward declaration */
+struct bnx2x_eth_q_stats {
+	u32 total_unicast_bytes_received_hi;
+	u32 total_unicast_bytes_received_lo;
+	u32 total_broadcast_bytes_received_hi;
+	u32 total_broadcast_bytes_received_lo;
+	u32 total_multicast_bytes_received_hi;
+	u32 total_multicast_bytes_received_lo;
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_unicast_bytes_transmitted_hi;
+	u32 total_unicast_bytes_transmitted_lo;
+	u32 total_broadcast_bytes_transmitted_hi;
+	u32 total_broadcast_bytes_transmitted_lo;
+	u32 total_multicast_bytes_transmitted_hi;
+	u32 total_multicast_bytes_transmitted_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 total_packets_received_checksum_discarded_hi;
+	u32 total_packets_received_checksum_discarded_lo;
+	u32 total_packets_received_ttl0_discarded_hi;
+	u32 total_packets_received_ttl0_discarded_lo;
+	u32 total_transmitted_dropped_packets_error_hi;
+	u32 total_transmitted_dropped_packets_error_lo;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+};
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+	} while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) { \
+			/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { \
+				/* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else { \
+				/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else { \
+			/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+				d_hi = 0; \
+				d_lo = 0; \
+			} else { \
+				/* m_hi >= s_hi */ \
+				d_hi = m_hi - s_hi; \
+				d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0)
+
+#define UPDATE_STAT64(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+		pstats->mac_stx[0].t##_hi = new->s##_hi; \
+		pstats->mac_stx[0].t##_lo = new->s##_lo; \
+		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+		       pstats->mac_stx[1].t##_lo, diff.lo); \
+	} while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+			diff.lo, new->s##_lo, old->s##_lo); \
+		ADD_64(estats->t##_hi, diff.hi, \
+		       estats->t##_lo, diff.lo); \
+	} while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0)
+
+#define ADD_STAT64(diff, t) \
+	do { \
+		ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+		       pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+	} while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+	do { \
+		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+			      pstats->mac_stx[1].s##_lo, \
+			      new->s); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+		old_tclient->s = tclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		old_uclient->s = uclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+		old_xclient->s = xclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+	do { \
+		SUB_64(m_hi, 0, m_lo, s); \
+	} while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+
+/* forward */
 struct bnx2x;
 
 void bnx2x_stats_init(struct bnx2x *bp);
 
-extern const u32 dmae_reg_go_c[];
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
 
 #endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c7537abc..77da2e8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -262,7 +262,7 @@
 	if (bond == NULL)
 		return BOND_AD_STABLE;
 
-	return BOND_AD_INFO(bond).agg_select_mode;
+	return bond->params.ad_select;
 }
 
 /**
@@ -1859,7 +1859,6 @@
 void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
 {
 	BOND_AD_INFO(bond).agg_select_timer = timeout;
-	BOND_AD_INFO(bond).agg_select_mode = bond->params.ad_select;
 }
 
 static u16 aggregator_identifier;
@@ -1868,11 +1867,10 @@
  * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
  * @bond: bonding struct to work on
  * @tick_resolution: tick duration (millisecond resolution)
- * @lacp_fast: boolean. whether fast periodic should be used
  *
  * Can be called only after the mac address of the bond is set.
  */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast)
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
 {
 	// check that the bond is not initialized yet
 	if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
@@ -1880,7 +1878,6 @@
 
 		aggregator_identifier = 0;
 
-		BOND_AD_INFO(bond).lacp_fast = lacp_fast;
 		BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
 		BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
 
@@ -1918,7 +1915,7 @@
 		// port initialization
 		port = &(SLAVE_AD_INFO(slave).port);
 
-		ad_initialize_port(port, BOND_AD_INFO(bond).lacp_fast);
+		ad_initialize_port(port, bond->params.lacp_fast);
 
 		port->slave = slave;
 		port->actor_port_number = SLAVE_AD_INFO(slave).id;
@@ -2473,3 +2470,34 @@
 	bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
 	read_unlock(&bond->lock);
 }
+
+/*
+ * When modify lacp_rate parameter via sysfs,
+ * update actor_oper_port_state of each port.
+ *
+ * Hold slave->state_machine_lock,
+ * so we can modify port->actor_oper_port_state,
+ * no matter bond is up or down.
+ */
+void bond_3ad_update_lacp_rate(struct bonding *bond)
+{
+	int i;
+	struct slave *slave;
+	struct port *port = NULL;
+	int lacp_fast;
+
+	read_lock(&bond->lock);
+	lacp_fast = bond->params.lacp_fast;
+
+	bond_for_each_slave(bond, slave, i) {
+		port = &(SLAVE_AD_INFO(slave).port);
+		__get_state_machine_lock(port);
+		if (lacp_fast)
+			port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+		else
+			port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
+		__release_state_machine_lock(port);
+	}
+
+	read_unlock(&bond->lock);
+}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 0ee3f16..235b2cc 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,11 +253,6 @@
 struct ad_bond_info {
 	struct ad_system system;	    /* 802.3ad system structure */
 	u32 agg_select_timer;	    // Timer to select aggregator after all adapter's hand shakes
-	u32 agg_select_mode;	    // Mode of selection of active aggregator(bandwidth/count)
-	int lacp_fast;		/* whether fast periodic tx should be
-				 * requested
-				 */
-	struct timer_list ad_timer;
 };
 
 struct ad_slave_info {
@@ -269,7 +264,7 @@
 };
 
 // ================= AD Exported functions to the main bonding code ==================
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast);
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
 int  bond_3ad_bind_slave(struct slave *slave);
 void bond_3ad_unbind_slave(struct slave *slave);
 void bond_3ad_state_machine_handler(struct work_struct *);
@@ -282,5 +277,6 @@
 void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
 			  struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
+void bond_3ad_update_lacp_rate(struct bonding *bond);
 #endif //__BOND_3AD_H__
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index eafe44a..d117280 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -329,16 +329,6 @@
 
 			kfree(vlan);
 
-			if (list_empty(&bond->vlan_list) &&
-			    (bond->slave_cnt == 0)) {
-				/* Last VLAN removed and no slaves, so
-				 * restore block on adding VLANs. This will
-				 * be removed once new slaves that are not
-				 * VLAN challenged will be added.
-				 */
-				bond->dev->features |= NETIF_F_VLAN_CHALLENGED;
-			}
-
 			res = 0;
 			goto out;
 		}
@@ -634,15 +624,8 @@
 		return -1;
 
 	slave_speed = ethtool_cmd_speed(&etool);
-	switch (slave_speed) {
-	case SPEED_10:
-	case SPEED_100:
-	case SPEED_1000:
-	case SPEED_10000:
-		break;
-	default:
+	if (slave_speed == 0 || slave_speed == ((__u32) -1))
 		return -1;
-	}
 
 	switch (etool.duplex) {
 	case DUPLEX_FULL:
@@ -1856,8 +1839,7 @@
 			/* Initialize AD with the number of times that the AD timer is called in 1 second
 			 * can be called only after the mac address of the bond is set
 			 */
-			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL,
-					    bond->params.lacp_fast);
+			bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
 		} else {
 			SLAVE_AD_INFO(new_slave).id =
 				SLAVE_AD_INFO(new_slave->prev).id + 1;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 88fcb25..03d1196 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -804,6 +804,7 @@
 
 	if ((new_value == 1) || (new_value == 0)) {
 		bond->params.lacp_fast = new_value;
+		bond_3ad_update_lacp_rate(bond);
 		pr_info("%s: Setting LACP rate to %s (%d).\n",
 			bond->dev->name, bond_lacp_tbl[new_value].modename,
 			new_value);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ea1d005..382903f 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -240,7 +240,6 @@
 	struct   bond_params params;
 	struct   list_head vlan_list;
 	struct   vlan_group *vlgrp;
-	struct   packet_type arp_mon_pt;
 	struct   workqueue_struct *wq;
 	struct   delayed_work mii_work;
 	struct   delayed_work arp_work;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 09ed3f4..abf4d7a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -38,3 +38,12 @@
 	default n
 	---help---
 	The CAIF shared memory protocol driver for the STE UX5500 platform.
+
+config CAIF_HSI
+       tristate "CAIF HSI transport driver"
+       depends on CAIF
+       default n
+       ---help---
+       The caif low level driver for CAIF over HSI.
+       Be aware that if you enable this then you also need to
+       enable a low-level HSI driver.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 9560b9d..91dff86 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -10,3 +10,6 @@
 # Shared memory
 caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
 obj-$(CONFIG_CAIF_SHM) += caif_shm.o
+
+# HSI interface
+obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
new file mode 100644
index 0000000..7a8ce612
--- /dev/null
+++ b/drivers/net/caif/caif_hsi.c
@@ -0,0 +1,1220 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Daniel Martensson / daniel.martensson@stericsson.com
+ *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_hsi.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
+MODULE_DESCRIPTION("CAIF HSI driver");
+
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
+				(((pow)-((x)&((pow)-1)))))
+
+/*
+ * HSI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+static int hsi_head_align = 4;
+module_param(hsi_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
+
+static int hsi_tail_align = 4;
+module_param(hsi_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
+
+/*
+ * HSI link layer flowcontrol thresholds.
+ * Warning: A high threshold value migth increase throughput but it will at
+ * the same time prevent channel prioritization and increase the risk of
+ * flooding the modem. The high threshold should be above the low.
+ */
+static int hsi_high_threshold = 100;
+module_param(hsi_high_threshold, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
+
+static int hsi_low_threshold = 50;
+module_param(hsi_low_threshold, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
+
+#define ON 1
+#define OFF 0
+
+/*
+ * Threshold values for the HSI packet queue. Flowcontrol will be asserted
+ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
+ * de-asserted before the number of packets drops below LOW_WATER_MARK.
+ */
+#define LOW_WATER_MARK   hsi_low_threshold
+#define HIGH_WATER_MARK  hsi_high_threshold
+
+static LIST_HEAD(cfhsi_list);
+static spinlock_t cfhsi_list_lock;
+
+static void cfhsi_inactivity_tout(unsigned long arg)
+{
+	struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	/* Schedule power down work queue. */
+	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
+}
+
+static void cfhsi_abort_tx(struct cfhsi *cfhsi)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		spin_lock_bh(&cfhsi->lock);
+		skb = skb_dequeue(&cfhsi->qhead);
+		if (!skb)
+			break;
+
+		cfhsi->ndev->stats.tx_errors++;
+		cfhsi->ndev->stats.tx_dropped++;
+		spin_unlock_bh(&cfhsi->lock);
+		kfree_skb(skb);
+	}
+	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+	spin_unlock_bh(&cfhsi->lock);
+}
+
+static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
+{
+	char buffer[32]; /* Any reasonable value */
+	size_t fifo_occupancy;
+	int ret;
+
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+
+	ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+	if (ret) {
+		dev_warn(&cfhsi->ndev->dev,
+			"%s: can't wake up HSI interface: %d.\n",
+			__func__, ret);
+		return ret;
+	}
+
+	do {
+		ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+				&fifo_occupancy);
+		if (ret) {
+			dev_warn(&cfhsi->ndev->dev,
+				"%s: can't get FIFO occupancy: %d.\n",
+				__func__, ret);
+			break;
+		} else if (!fifo_occupancy)
+			/* No more data, exitting normally */
+			break;
+
+		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
+		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+		ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
+				cfhsi->dev);
+		if (ret) {
+			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+			dev_warn(&cfhsi->ndev->dev,
+				"%s: can't read data: %d.\n",
+				__func__, ret);
+			break;
+		}
+
+		ret = 5 * HZ;
+		wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
+			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
+
+		if (ret < 0) {
+			dev_warn(&cfhsi->ndev->dev,
+				"%s: can't wait for flush complete: %d.\n",
+				__func__, ret);
+			break;
+		} else if (!ret) {
+			ret = -ETIMEDOUT;
+			dev_warn(&cfhsi->ndev->dev,
+				"%s: timeout waiting for flush complete.\n",
+				__func__);
+			break;
+		}
+	} while (1);
+
+	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+
+	return ret;
+}
+
+static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int nfrms = 0;
+	int pld_len = 0;
+	struct sk_buff *skb;
+	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+
+	skb = skb_dequeue(&cfhsi->qhead);
+	if (!skb)
+		return 0;
+
+	/* Check if we can embed a CAIF frame. */
+	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
+		struct caif_payload_info *info;
+		int hpad = 0;
+		int tpad = 0;
+
+		/* Calculate needed head alignment and tail alignment. */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+
+		/* Check if frame still fits with added alignment. */
+		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
+			u8 *pemb = desc->emb_frm;
+			desc->offset = CFHSI_DESC_SHORT_SZ;
+			*pemb = (u8)(hpad - 1);
+			pemb += hpad;
+
+			/* Update network statistics. */
+			cfhsi->ndev->stats.tx_packets++;
+			cfhsi->ndev->stats.tx_bytes += skb->len;
+
+			/* Copy in embedded CAIF frame. */
+			skb_copy_bits(skb, 0, pemb, skb->len);
+			consume_skb(skb);
+			skb = NULL;
+		}
+	} else
+		/* Clear offset. */
+		desc->offset = 0;
+
+	/* Create payload CAIF frames. */
+	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+	while (nfrms < CFHSI_MAX_PKTS) {
+		struct caif_payload_info *info;
+		int hpad = 0;
+		int tpad = 0;
+
+		if (!skb)
+			skb = skb_dequeue(&cfhsi->qhead);
+
+		if (!skb)
+			break;
+
+		/* Calculate needed head alignment and tail alignment. */
+		info = (struct caif_payload_info *)&skb->cb;
+
+		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+
+		/* Fill in CAIF frame length in descriptor. */
+		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
+
+		/* Fill head padding information. */
+		*pfrm = (u8)(hpad - 1);
+		pfrm += hpad;
+
+		/* Update network statistics. */
+		cfhsi->ndev->stats.tx_packets++;
+		cfhsi->ndev->stats.tx_bytes += skb->len;
+
+		/* Copy in CAIF frame. */
+		skb_copy_bits(skb, 0, pfrm, skb->len);
+
+		/* Update payload length. */
+		pld_len += desc->cffrm_len[nfrms];
+
+		/* Update frame pointer. */
+		pfrm += skb->len + tpad;
+		consume_skb(skb);
+		skb = NULL;
+
+		/* Update number of frames. */
+		nfrms++;
+	}
+
+	/* Unused length fields should be zero-filled (according to SPEC). */
+	while (nfrms < CFHSI_MAX_PKTS) {
+		desc->cffrm_len[nfrms] = 0x0000;
+		nfrms++;
+	}
+
+	/* Check if we can piggy-back another descriptor. */
+	skb = skb_peek(&cfhsi->qhead);
+	if (skb)
+		desc->header |= CFHSI_PIGGY_DESC;
+	else
+		desc->header &= ~CFHSI_PIGGY_DESC;
+
+	return CFHSI_DESC_SZ + pld_len;
+}
+
+static void cfhsi_tx_done_work(struct work_struct *work)
+{
+	struct cfhsi *cfhsi = NULL;
+	struct cfhsi_desc *desc = NULL;
+	int len = 0;
+	int res;
+
+	cfhsi = container_of(work, struct cfhsi, tx_done_work);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+
+	do {
+		/*
+		 * Send flow on if flow off has been previously signalled
+		 * and number of packets is below low water mark.
+		 */
+		spin_lock_bh(&cfhsi->lock);
+		if (cfhsi->flow_off_sent &&
+				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
+				cfhsi->cfdev.flowctrl) {
+
+			cfhsi->flow_off_sent = 0;
+			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
+		}
+		spin_unlock_bh(&cfhsi->lock);
+
+		/* Create HSI frame. */
+		len = cfhsi_tx_frm(desc, cfhsi);
+		if (!len) {
+			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+			/* Start inactivity timer. */
+			mod_timer(&cfhsi->timer,
+					jiffies + CFHSI_INACTIVITY_TOUT);
+			break;
+		}
+
+		/* Set up new transfer. */
+		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+		if (WARN_ON(res < 0)) {
+			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+				__func__, res);
+		}
+	} while (res < 0);
+}
+
+static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
+{
+	struct cfhsi *cfhsi;
+
+	cfhsi = container_of(drv, struct cfhsi, drv);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	queue_work(cfhsi->wq, &cfhsi->tx_done_work);
+}
+
+static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int xfer_sz = 0;
+	int nfrms = 0;
+	u16 *plen = NULL;
+	u8 *pfrm = NULL;
+
+	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
+			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
+		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+			__func__);
+		return 0;
+	}
+
+	/* Check for embedded CAIF frame. */
+	if (desc->offset) {
+		struct sk_buff *skb;
+		u8 *dst = NULL;
+		int len = 0, retries = 0;
+		pfrm = ((u8 *)desc) + desc->offset;
+
+		/* Remove offset padding. */
+		pfrm += *pfrm + 1;
+
+		/* Read length of CAIF frame (little endian). */
+		len = *pfrm;
+		len |= ((*(pfrm+1)) << 8) & 0xFF00;
+		len += 2;	/* Add FCS fields. */
+
+
+		/* Allocate SKB (OK even in IRQ context). */
+		skb = alloc_skb(len + 1, GFP_KERNEL);
+		while (!skb) {
+			retries++;
+			schedule_timeout(1);
+			skb = alloc_skb(len + 1, GFP_KERNEL);
+			if (skb) {
+				printk(KERN_WARNING "%s: slept for %u "
+						"before getting memory\n",
+						__func__, retries);
+				break;
+			}
+			if (retries > HZ) {
+				printk(KERN_ERR "%s: slept for 1HZ and "
+						"did not get memory\n",
+						__func__);
+				cfhsi->ndev->stats.rx_dropped++;
+				goto drop_frame;
+			}
+		}
+		caif_assert(skb != NULL);
+
+		dst = skb_put(skb, len);
+		memcpy(dst, pfrm, len);
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfhsi->ndev;
+
+		/*
+		 * We are called from a arch specific platform device.
+		 * Unfortunately we don't know what context we're
+		 * running in.
+		 */
+		if (in_interrupt())
+			netif_rx(skb);
+		else
+			netif_rx_ni(skb);
+
+		/* Update network statistics. */
+		cfhsi->ndev->stats.rx_packets++;
+		cfhsi->ndev->stats.rx_bytes += len;
+	}
+
+drop_frame:
+	/* Calculate transfer length. */
+	plen = desc->cffrm_len;
+	while (nfrms < CFHSI_MAX_PKTS && *plen) {
+		xfer_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	/* Check for piggy-backed descriptor. */
+	if (desc->header & CFHSI_PIGGY_DESC)
+		xfer_sz += CFHSI_DESC_SZ;
+
+	if (xfer_sz % 4) {
+		dev_err(&cfhsi->ndev->dev,
+				"%s: Invalid payload len: %d, ignored.\n",
+			__func__, xfer_sz);
+		xfer_sz = 0;
+	}
+
+	return xfer_sz;
+}
+
+static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+	int rx_sz = 0;
+	int nfrms = 0;
+	u16 *plen = NULL;
+	u8 *pfrm = NULL;
+
+	/* Sanity check header and offset. */
+	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
+			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
+		dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Set frame pointer to start of payload. */
+	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+	plen = desc->cffrm_len;
+	while (nfrms < CFHSI_MAX_PKTS && *plen) {
+		struct sk_buff *skb;
+		u8 *dst = NULL;
+		u8 *pcffrm = NULL;
+		int len = 0, retries = 0;
+
+		if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
+			dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		/* CAIF frame starts after head padding. */
+		pcffrm = pfrm + *pfrm + 1;
+
+		/* Read length of CAIF frame (little endian). */
+		len = *pcffrm;
+		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
+		len += 2;	/* Add FCS fields. */
+
+		/* Allocate SKB (OK even in IRQ context). */
+		skb = alloc_skb(len + 1, GFP_KERNEL);
+		while (!skb) {
+			retries++;
+			schedule_timeout(1);
+			skb = alloc_skb(len + 1, GFP_KERNEL);
+			if (skb) {
+				printk(KERN_WARNING "%s: slept for %u "
+						"before getting memory\n",
+						__func__, retries);
+				break;
+			}
+			if (retries > HZ) {
+				printk(KERN_ERR "%s: slept for 1HZ "
+						"and did not get memory\n",
+						__func__);
+				cfhsi->ndev->stats.rx_dropped++;
+				goto drop_frame;
+			}
+		}
+		caif_assert(skb != NULL);
+
+		dst = skb_put(skb, len);
+		memcpy(dst, pcffrm, len);
+
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfhsi->ndev;
+
+		/*
+		 * We're called from a platform device,
+		 * and don't know the context we're running in.
+		 */
+		if (in_interrupt())
+			netif_rx(skb);
+		else
+			netif_rx_ni(skb);
+
+		/* Update network statistics. */
+		cfhsi->ndev->stats.rx_packets++;
+		cfhsi->ndev->stats.rx_bytes += len;
+
+drop_frame:
+		pfrm += *plen;
+		rx_sz += *plen;
+		plen++;
+		nfrms++;
+	}
+
+	return rx_sz;
+}
+
+static void cfhsi_rx_done_work(struct work_struct *work)
+{
+	int res;
+	int desc_pld_len = 0;
+	struct cfhsi *cfhsi = NULL;
+	struct cfhsi_desc *desc = NULL;
+
+	cfhsi = container_of(work, struct cfhsi, rx_done_work);
+	desc = (struct cfhsi_desc *)cfhsi->rx_buf;
+
+	dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Update inactivity timer if pending. */
+	mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+
+	if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
+		desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
+	} else {
+		int pld_len;
+
+		pld_len = cfhsi_rx_pld(desc, cfhsi);
+
+		if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
+			struct cfhsi_desc *piggy_desc;
+			piggy_desc = (struct cfhsi_desc *)
+				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
+						pld_len);
+
+			/* Extract piggy-backed descriptor. */
+			desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
+
+			/*
+			 * Copy needed information from the piggy-backed
+			 * descriptor to the descriptor in the start.
+			 */
+			memcpy((u8 *)desc, (u8 *)piggy_desc,
+					CFHSI_DESC_SHORT_SZ);
+		}
+	}
+
+	if (desc_pld_len) {
+		cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
+		cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
+		cfhsi->rx_len = desc_pld_len;
+	} else {
+		cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+		cfhsi->rx_ptr = cfhsi->rx_buf;
+		cfhsi->rx_len = CFHSI_DESC_SZ;
+	}
+	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
+		/* Set up new transfer. */
+		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+			__func__);
+		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
+				cfhsi->dev);
+		if (WARN_ON(res < 0)) {
+			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+				__func__, res);
+			cfhsi->ndev->stats.rx_errors++;
+			cfhsi->ndev->stats.rx_dropped++;
+		}
+	}
+}
+
+static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
+{
+	struct cfhsi *cfhsi;
+
+	cfhsi = container_of(drv, struct cfhsi, drv);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
+		wake_up_interruptible(&cfhsi->flush_fifo_wait);
+	else
+		queue_work(cfhsi->wq, &cfhsi->rx_done_work);
+}
+
+static void cfhsi_wake_up(struct work_struct *work)
+{
+	struct cfhsi *cfhsi = NULL;
+	int res;
+	int len;
+	long ret;
+
+	cfhsi = container_of(work, struct cfhsi, wake_up_work);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
+		/* It happenes when wakeup is requested by
+		 * both ends at the same time. */
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		return;
+	}
+
+	/* Activate wake line. */
+	cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+
+	dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
+		__func__);
+
+	/* Wait for acknowledge. */
+	ret = CFHSI_WAKEUP_TOUT;
+	wait_event_interruptible_timeout(cfhsi->wake_up_wait,
+					test_bit(CFHSI_WAKE_UP_ACK,
+							&cfhsi->bits), ret);
+	if (unlikely(ret < 0)) {
+		/* Interrupted by signal. */
+		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+			__func__, ret);
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+		return;
+	} else if (!ret) {
+		/* Wakeup timeout */
+		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+			__func__);
+		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+		cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+		return;
+	}
+	dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
+		__func__);
+
+	/* Clear power up bit. */
+	set_bit(CFHSI_AWAKE, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+
+	/* Resume read operation. */
+	if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
+		dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+			__func__);
+		res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
+				cfhsi->rx_len, cfhsi->dev);
+		if (WARN_ON(res < 0)) {
+			dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+				__func__, res);
+		}
+	}
+
+	/* Clear power up acknowledment. */
+	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+	spin_lock_bh(&cfhsi->lock);
+
+	/* Resume transmit if queue is not empty. */
+	if (!skb_peek(&cfhsi->qhead)) {
+		dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
+			__func__);
+		/* Start inactivity timer. */
+		mod_timer(&cfhsi->timer,
+				jiffies + CFHSI_INACTIVITY_TOUT);
+		spin_unlock_bh(&cfhsi->lock);
+		return;
+	}
+
+	dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
+		__func__);
+
+	spin_unlock_bh(&cfhsi->lock);
+
+	/* Create HSI frame. */
+	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
+
+	if (likely(len > 0)) {
+		/* Set up new transfer. */
+		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+		if (WARN_ON(res < 0)) {
+			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+				__func__, res);
+			cfhsi_abort_tx(cfhsi);
+		}
+	} else {
+		dev_err(&cfhsi->ndev->dev,
+				"%s: Failed to create HSI frame: %d.\n",
+				__func__, len);
+	}
+
+}
+
+static void cfhsi_wake_down(struct work_struct *work)
+{
+	long ret;
+	struct cfhsi *cfhsi = NULL;
+	size_t fifo_occupancy;
+
+	cfhsi = container_of(work, struct cfhsi, wake_down_work);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Check if there is something in FIFO. */
+	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+							&fifo_occupancy)))
+		fifo_occupancy = 0;
+
+	if (fifo_occupancy) {
+		dev_dbg(&cfhsi->ndev->dev,
+				"%s: %u words in RX FIFO, restart timer.\n",
+				__func__, (unsigned) fifo_occupancy);
+		spin_lock_bh(&cfhsi->lock);
+		mod_timer(&cfhsi->timer,
+				jiffies + CFHSI_INACTIVITY_TOUT);
+		spin_unlock_bh(&cfhsi->lock);
+		return;
+	}
+
+	/* Cancel pending RX requests */
+	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+
+	/* Deactivate wake line. */
+	cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+
+	/* Wait for acknowledge. */
+	ret = CFHSI_WAKEUP_TOUT;
+	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
+					test_bit(CFHSI_WAKE_DOWN_ACK,
+							&cfhsi->bits),
+					ret);
+	if (ret < 0) {
+		/* Interrupted by signal. */
+		dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+			__func__, ret);
+		return;
+	} else if (!ret) {
+		/* Timeout */
+		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+			__func__);
+	}
+
+	/* Clear power down acknowledment. */
+	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+
+	/* Check if there is something in FIFO. */
+	if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+							&fifo_occupancy)))
+		fifo_occupancy = 0;
+
+	if (fifo_occupancy) {
+		dev_dbg(&cfhsi->ndev->dev,
+				"%s: %u words in RX FIFO, wakeup forced.\n",
+				__func__, (unsigned) fifo_occupancy);
+		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+	} else
+		dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
+			__func__);
+}
+
+static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
+{
+	struct cfhsi *cfhsi = NULL;
+
+	cfhsi = container_of(drv, struct cfhsi, drv);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+	wake_up_interruptible(&cfhsi->wake_up_wait);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/* Schedule wake up work queue if the peer initiates. */
+	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+}
+
+static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
+{
+	struct cfhsi *cfhsi = NULL;
+
+	cfhsi = container_of(drv, struct cfhsi, drv);
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	/* Initiating low power is only permitted by the host (us). */
+	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+	wake_up_interruptible(&cfhsi->wake_down_wait);
+}
+
+static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cfhsi *cfhsi = NULL;
+	int start_xfer = 0;
+	int timer_active;
+
+	if (!dev)
+		return -EINVAL;
+
+	cfhsi = netdev_priv(dev);
+
+	spin_lock_bh(&cfhsi->lock);
+
+	skb_queue_tail(&cfhsi->qhead, skb);
+
+	/* Sanity check; xmit should not be called after unregister_netdev */
+	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
+		spin_unlock_bh(&cfhsi->lock);
+		cfhsi_abort_tx(cfhsi);
+		return -EINVAL;
+	}
+
+	/* Send flow off if number of packets is above high water mark. */
+	if (!cfhsi->flow_off_sent &&
+		cfhsi->qhead.qlen > cfhsi->q_high_mark &&
+		cfhsi->cfdev.flowctrl) {
+		cfhsi->flow_off_sent = 1;
+		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
+	}
+
+	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
+		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
+		start_xfer = 1;
+	}
+
+	spin_unlock_bh(&cfhsi->lock);
+
+	if (!start_xfer)
+		return 0;
+
+	/* Delete inactivity timer if started. */
+#ifdef CONFIG_SMP
+	timer_active = del_timer_sync(&cfhsi->timer);
+#else
+	timer_active = del_timer(&cfhsi->timer);
+#endif /* CONFIG_SMP */
+
+	if (timer_active) {
+		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+		int len;
+		int res;
+
+		/* Create HSI frame. */
+		len = cfhsi_tx_frm(desc, cfhsi);
+		BUG_ON(!len);
+
+		/* Set up new transfer. */
+		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+		if (WARN_ON(res < 0)) {
+			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+				__func__, res);
+			cfhsi_abort_tx(cfhsi);
+		}
+	} else {
+		/* Schedule wake up work queue if the we initiate. */
+		if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+			queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+	}
+
+	return 0;
+}
+
+static int cfhsi_open(struct net_device *dev)
+{
+	netif_wake_queue(dev);
+
+	return 0;
+}
+
+static int cfhsi_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+
+	return 0;
+}
+
+static const struct net_device_ops cfhsi_ops = {
+	.ndo_open = cfhsi_open,
+	.ndo_stop = cfhsi_close,
+	.ndo_start_xmit = cfhsi_xmit
+};
+
+static void cfhsi_setup(struct net_device *dev)
+{
+	struct cfhsi *cfhsi = netdev_priv(dev);
+	dev->features = 0;
+	dev->netdev_ops = &cfhsi_ops;
+	dev->type = ARPHRD_CAIF;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+	dev->tx_queue_len = 0;
+	dev->destructor = free_netdev;
+	skb_queue_head_init(&cfhsi->qhead);
+	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+	cfhsi->cfdev.use_frag = false;
+	cfhsi->cfdev.use_stx = false;
+	cfhsi->cfdev.use_fcs = false;
+	cfhsi->ndev = dev;
+}
+
+int cfhsi_probe(struct platform_device *pdev)
+{
+	struct cfhsi *cfhsi = NULL;
+	struct net_device *ndev;
+	struct cfhsi_dev *dev;
+	int res;
+
+	ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
+	if (!ndev) {
+		dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	cfhsi = netdev_priv(ndev);
+	cfhsi->ndev = ndev;
+	cfhsi->pdev = pdev;
+
+	/* Initialize state vaiables. */
+	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+	cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+
+	/* Set flow info */
+	cfhsi->flow_off_sent = 0;
+	cfhsi->q_low_mark = LOW_WATER_MARK;
+	cfhsi->q_high_mark = HIGH_WATER_MARK;
+
+	/* Assign the HSI device. */
+	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
+	cfhsi->dev = dev;
+
+	/* Assign the driver to this HSI device. */
+	dev->drv = &cfhsi->drv;
+
+	/*
+	 * Allocate a TX buffer with the size of a HSI packet descriptors
+	 * and the necessary room for CAIF payload frames.
+	 */
+	cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
+	if (!cfhsi->tx_buf) {
+		dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
+			__func__);
+		res = -ENODEV;
+		goto err_alloc_tx;
+	}
+
+	/*
+	 * Allocate a RX buffer with the size of two HSI packet descriptors and
+	 * the necessary room for CAIF payload frames.
+	 */
+	cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
+	if (!cfhsi->rx_buf) {
+		dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
+			__func__);
+		res = -ENODEV;
+		goto err_alloc_rx;
+	}
+
+	/* Initialize recieve vaiables. */
+	cfhsi->rx_ptr = cfhsi->rx_buf;
+	cfhsi->rx_len = CFHSI_DESC_SZ;
+
+	/* Initialize spin locks. */
+	spin_lock_init(&cfhsi->lock);
+
+	/* Set up the driver. */
+	cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
+	cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
+
+	/* Initialize the work queues. */
+	INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
+	INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
+	INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
+	INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
+
+	/* Clear all bit fields. */
+	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+	clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+	/* Create work thread. */
+	cfhsi->wq = create_singlethread_workqueue(pdev->name);
+	if (!cfhsi->wq) {
+		dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
+			__func__);
+		res = -ENODEV;
+		goto err_create_wq;
+	}
+
+	/* Initialize wait queues. */
+	init_waitqueue_head(&cfhsi->wake_up_wait);
+	init_waitqueue_head(&cfhsi->wake_down_wait);
+	init_waitqueue_head(&cfhsi->flush_fifo_wait);
+
+	/* Setup the inactivity timer. */
+	init_timer(&cfhsi->timer);
+	cfhsi->timer.data = (unsigned long)cfhsi;
+	cfhsi->timer.function = cfhsi_inactivity_tout;
+
+	/* Add CAIF HSI device to list. */
+	spin_lock(&cfhsi_list_lock);
+	list_add_tail(&cfhsi->list, &cfhsi_list);
+	spin_unlock(&cfhsi_list_lock);
+
+	/* Activate HSI interface. */
+	res = cfhsi->dev->cfhsi_up(cfhsi->dev);
+	if (res) {
+		dev_err(&cfhsi->ndev->dev,
+			"%s: can't activate HSI interface: %d.\n",
+			__func__, res);
+		goto err_activate;
+	}
+
+	/* Flush FIFO */
+	res = cfhsi_flush_fifo(cfhsi);
+	if (res) {
+		dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
+			__func__, res);
+		goto err_net_reg;
+	}
+
+	cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
+	cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
+
+	/* Register network device. */
+	res = register_netdev(ndev);
+	if (res) {
+		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
+			__func__, res);
+		goto err_net_reg;
+	}
+
+	netif_stop_queue(ndev);
+
+	return res;
+
+ err_net_reg:
+	cfhsi->dev->cfhsi_down(cfhsi->dev);
+ err_activate:
+	destroy_workqueue(cfhsi->wq);
+ err_create_wq:
+	kfree(cfhsi->rx_buf);
+ err_alloc_rx:
+	kfree(cfhsi->tx_buf);
+ err_alloc_tx:
+	free_netdev(ndev);
+
+	return res;
+}
+
+static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
+{
+	u8 *tx_buf, *rx_buf;
+
+	/* Stop TXing */
+	netif_tx_stop_all_queues(cfhsi->ndev);
+
+	/* going to shutdown driver */
+	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
+	if (remove_platform_dev) {
+		/* Flush workqueue */
+		flush_workqueue(cfhsi->wq);
+
+		/* Notify device. */
+		platform_device_unregister(cfhsi->pdev);
+	}
+
+	/* Flush workqueue */
+	flush_workqueue(cfhsi->wq);
+
+	/* Delete timer if pending */
+#ifdef CONFIG_SMP
+	del_timer_sync(&cfhsi->timer);
+#else
+	del_timer(&cfhsi->timer);
+#endif /* CONFIG_SMP */
+
+	/* Cancel pending RX request (if any) */
+	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+
+	/* Flush again and destroy workqueue */
+	destroy_workqueue(cfhsi->wq);
+
+	/* Store bufferes: will be freed later. */
+	tx_buf = cfhsi->tx_buf;
+	rx_buf = cfhsi->rx_buf;
+
+	/* Flush transmit queues. */
+	cfhsi_abort_tx(cfhsi);
+
+	/* Deactivate interface */
+	cfhsi->dev->cfhsi_down(cfhsi->dev);
+
+	/* Finally unregister the network device. */
+	unregister_netdev(cfhsi->ndev);
+
+	/* Free buffers. */
+	kfree(tx_buf);
+	kfree(rx_buf);
+}
+
+int cfhsi_remove(struct platform_device *pdev)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfhsi *cfhsi = NULL;
+	struct cfhsi_dev *dev;
+
+	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
+	spin_lock(&cfhsi_list_lock);
+	list_for_each_safe(list_node, n, &cfhsi_list) {
+		cfhsi = list_entry(list_node, struct cfhsi, list);
+		/* Find the corresponding device. */
+		if (cfhsi->dev == dev) {
+			/* Remove from list. */
+			list_del(list_node);
+			spin_unlock(&cfhsi_list_lock);
+
+			/* Shutdown driver. */
+			cfhsi_shutdown(cfhsi, false);
+
+			return 0;
+		}
+	}
+	spin_unlock(&cfhsi_list_lock);
+	return -ENODEV;
+}
+
+struct platform_driver cfhsi_plat_drv = {
+	.probe = cfhsi_probe,
+	.remove = cfhsi_remove,
+	.driver = {
+		   .name = "cfhsi",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static void __exit cfhsi_exit_module(void)
+{
+	struct list_head *list_node;
+	struct list_head *n;
+	struct cfhsi *cfhsi = NULL;
+
+	spin_lock(&cfhsi_list_lock);
+	list_for_each_safe(list_node, n, &cfhsi_list) {
+		cfhsi = list_entry(list_node, struct cfhsi, list);
+
+		/* Remove from list. */
+		list_del(list_node);
+		spin_unlock(&cfhsi_list_lock);
+
+		/* Shutdown driver. */
+		cfhsi_shutdown(cfhsi, true);
+
+		spin_lock(&cfhsi_list_lock);
+	}
+	spin_unlock(&cfhsi_list_lock);
+
+	/* Unregister platform driver. */
+	platform_driver_unregister(&cfhsi_plat_drv);
+}
+
+static int __init cfhsi_init_module(void)
+{
+	int result;
+
+	/* Initialize spin lock. */
+	spin_lock_init(&cfhsi_list_lock);
+
+	/* Register platform driver. */
+	result = platform_driver_register(&cfhsi_plat_drv);
+	if (result) {
+		printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
+			result);
+		goto err_dev_register;
+	}
+
+	return result;
+
+ err_dev_register:
+	return result;
+}
+
+module_init(cfhsi_init_module);
+module_exit(cfhsi_exit_module);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 3df0c0f..1cd0b59 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -4,6 +4,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
+#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/version.h>
 #include <linux/module.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1d699e3..bbf06f7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -58,9 +58,10 @@
 
 config CAN_AT91
 	tristate "Atmel AT91 onchip CAN controller"
-	depends on CAN_DEV && ARCH_AT91SAM9263
+	depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5)
 	---help---
-	  This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
+	  This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
+	  and AT91SAM9X5 processors.
 
 config CAN_TI_HECC
 	depends on CAN_DEV && ARCH_OMAP3
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 74efb5a..121ede6 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -41,32 +41,7 @@
 
 #include <mach/board.h>
 
-#define AT91_NAPI_WEIGHT	11
-
-/*
- * RX/TX Mailbox split
- * don't dare to touch
- */
-#define AT91_MB_RX_NUM		11
-#define AT91_MB_TX_SHIFT	2
-
-#define AT91_MB_RX_FIRST	1
-#define AT91_MB_RX_LAST		(AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
-
-#define AT91_MB_RX_MASK(i)	((1 << (i)) - 1)
-#define AT91_MB_RX_SPLIT	8
-#define AT91_MB_RX_LOW_LAST	(AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
-				 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
-
-#define AT91_MB_TX_NUM		(1 << AT91_MB_TX_SHIFT)
-#define AT91_MB_TX_FIRST	(AT91_MB_RX_LAST + 1)
-#define AT91_MB_TX_LAST		(AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
-
-#define AT91_NEXT_PRIO_SHIFT	(AT91_MB_TX_SHIFT)
-#define AT91_NEXT_PRIO_MASK	(0xf << AT91_MB_TX_SHIFT)
-#define AT91_NEXT_MB_MASK	(AT91_MB_TX_NUM - 1)
-#define AT91_NEXT_MASK		((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+#define AT91_MB_MASK(i)		((1 << (i)) - 1)
 
 /* Common registers */
 enum at91_reg {
@@ -128,12 +103,6 @@
 };
 
 /* Interrupt mask bits */
-#define AT91_IRQ_MB_RX		((1 << (AT91_MB_RX_LAST + 1)) \
-				 - (1 << AT91_MB_RX_FIRST))
-#define AT91_IRQ_MB_TX		((1 << (AT91_MB_TX_LAST + 1)) \
-				 - (1 << AT91_MB_TX_FIRST))
-#define AT91_IRQ_MB_ALL		(AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
-
 #define AT91_IRQ_ERRA		(1 << 16)
 #define AT91_IRQ_WARN		(1 << 17)
 #define AT91_IRQ_ERRP		(1 << 18)
@@ -156,22 +125,51 @@
 
 #define AT91_IRQ_ALL		(0x1fffffff)
 
+enum at91_devtype {
+	AT91_DEVTYPE_SAM9263,
+	AT91_DEVTYPE_SAM9X5,
+};
+
+struct at91_devtype_data {
+	unsigned int rx_first;
+	unsigned int rx_split;
+	unsigned int rx_last;
+	unsigned int tx_shift;
+	enum at91_devtype type;
+};
+
 struct at91_priv {
-	struct can_priv		can;	   /* must be the first member! */
-	struct net_device	*dev;
-	struct napi_struct	napi;
+	struct can_priv can;		/* must be the first member! */
+	struct net_device *dev;
+	struct napi_struct napi;
 
-	void __iomem		*reg_base;
+	void __iomem *reg_base;
 
-	u32			reg_sr;
-	unsigned int		tx_next;
-	unsigned int		tx_echo;
-	unsigned int		rx_next;
+	u32 reg_sr;
+	unsigned int tx_next;
+	unsigned int tx_echo;
+	unsigned int rx_next;
+	struct at91_devtype_data devtype_data;
 
-	struct clk		*clk;
-	struct at91_can_data	*pdata;
+	struct clk *clk;
+	struct at91_can_data *pdata;
 
-	canid_t			mb0_id;
+	canid_t mb0_id;
+};
+
+static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
+	[AT91_DEVTYPE_SAM9263] = {
+		.rx_first = 1,
+		.rx_split = 8,
+		.rx_last = 11,
+		.tx_shift = 2,
+	},
+	[AT91_DEVTYPE_SAM9X5] = {
+		.rx_first = 0,
+		.rx_split = 4,
+		.rx_last = 5,
+		.tx_shift = 1,
+	},
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
@@ -186,19 +184,111 @@
 	.brp_inc	= 1,
 };
 
-static inline int get_tx_next_mb(const struct at91_priv *priv)
-{
-	return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+#define AT91_IS(_model) \
+static inline int at91_is_sam##_model(const struct at91_priv *priv) \
+{ \
+	return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
 }
 
-static inline int get_tx_next_prio(const struct at91_priv *priv)
+AT91_IS(9263);
+AT91_IS(9X5);
+
+static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
 {
-	return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+	return priv->devtype_data.rx_first;
 }
 
-static inline int get_tx_echo_mb(const struct at91_priv *priv)
+static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
 {
-	return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+	return priv->devtype_data.rx_last;
+}
+
+static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
+{
+	return priv->devtype_data.rx_split;
+}
+
+static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
+{
+	return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
+}
+
+static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
+{
+	return get_mb_rx_split(priv) - 1;
+}
+
+static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
+{
+	return AT91_MB_MASK(get_mb_rx_split(priv)) &
+		~AT91_MB_MASK(get_mb_rx_first(priv));
+}
+
+static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
+{
+	return priv->devtype_data.tx_shift;
+}
+
+static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
+{
+	return 1 << get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
+{
+	return get_mb_rx_last(priv) + 1;
+}
+
+static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
+{
+	return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
+}
+
+static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
+{
+	return get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
+{
+	return 0xf << get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
+{
+	return AT91_MB_MASK(get_mb_tx_shift(priv));
+}
+
+static inline unsigned int get_next_mask(const struct at91_priv *priv)
+{
+	return get_next_mb_mask(priv) | get_next_prio_mask(priv);
+}
+
+static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
+{
+	return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
+		~AT91_MB_MASK(get_mb_rx_first(priv));
+}
+
+static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
+{
+	return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
+		~AT91_MB_MASK(get_mb_tx_first(priv));
+}
+
+static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
+}
+
+static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
+{
+	return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
+}
+
+static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
 }
 
 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
@@ -259,29 +349,29 @@
 	 * overflow.
 	 */
 	reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
-	for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+	for (i = 0; i < get_mb_rx_first(priv); i++) {
 		set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
 		at91_write(priv, AT91_MID(i), reg_mid);
 		at91_write(priv, AT91_MCR(i), 0x0);	/* clear dlc */
 	}
 
-	for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+	for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
 		set_mb_mode(priv, i, AT91_MB_MODE_RX);
-	set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+	set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
 
 	/* reset acceptance mask and id register */
-	for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
-		at91_write(priv, AT91_MAM(i), 0x0 );
+	for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
+		at91_write(priv, AT91_MAM(i), 0x0);
 		at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
 	}
 
 	/* The last 4 mailboxes are used for transmitting. */
-	for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+	for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
 		set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
 
 	/* Reset tx and rx helper pointers */
 	priv->tx_next = priv->tx_echo = 0;
-	priv->rx_next = AT91_MB_RX_FIRST;
+	priv->rx_next = get_mb_rx_first(priv);
 }
 
 static int at91_set_bittiming(struct net_device *dev)
@@ -336,7 +426,7 @@
 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
 	/* Enable interrupts */
-	reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+	reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
 	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
 	at91_write(priv, AT91_IER, reg_ier);
 }
@@ -375,8 +465,8 @@
  * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
  * encode the mailbox number, the upper 4 bits the mailbox priority:
  *
- * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
- *                 (mb - AT91_MB_TX_FIRST);
+ * priv->tx_next = (prio << get_next_prio_shift(priv)) |
+ *                 (mb - get_mb_tx_first(priv));
  *
  */
 static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -417,7 +507,7 @@
 	stats->tx_bytes += cf->can_dlc;
 
 	/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
-	can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+	can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
 
 	/*
 	 * we have to stop the queue and deliver all messages in case
@@ -430,7 +520,7 @@
 	priv->tx_next++;
 	if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
 	      AT91_MSR_MRDY) ||
-	    (priv->tx_next & AT91_NEXT_MASK) == 0)
+	    (priv->tx_next & get_next_mask(priv)) == 0)
 		netif_stop_queue(dev);
 
 	/* Enable interrupt for this mailbox */
@@ -447,7 +537,7 @@
  */
 static inline void at91_activate_rx_low(const struct at91_priv *priv)
 {
-	u32 mask = AT91_MB_RX_LOW_MASK;
+	u32 mask = get_mb_rx_low_mask(priv);
 	at91_write(priv, AT91_TCR, mask);
 }
 
@@ -513,17 +603,19 @@
 		cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
 
 	reg_msr = at91_read(priv, AT91_MSR(mb));
-	if (reg_msr & AT91_MSR_MRTR)
-		cf->can_id |= CAN_RTR_FLAG;
 	cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
 
-	*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
-	*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+	if (reg_msr & AT91_MSR_MRTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	else {
+		*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+		*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+	}
 
 	/* allow RX of extended frames */
 	at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
 
-	if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+	if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
 		at91_rx_overflow_err(dev);
 }
 
@@ -561,8 +653,9 @@
  *
  * Theory of Operation:
  *
- * 11 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
+ * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
+ * on the chip are reserved for RX. We split them into 2 groups. The
+ * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
  *
  * Like it or not, but the chip always saves a received CAN message
  * into the first free mailbox it finds (starting with the
@@ -610,23 +703,23 @@
 	unsigned int mb;
 	int received = 0;
 
-	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
-	    reg_sr & AT91_MB_RX_LOW_MASK)
+	if (priv->rx_next > get_mb_rx_low_last(priv) &&
+	    reg_sr & get_mb_rx_low_mask(priv))
 		netdev_info(dev,
 			"order of incoming frames cannot be guaranteed\n");
 
  again:
-	for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
-	     mb < AT91_MB_RX_LAST + 1 && quota > 0;
+	for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
+	     mb < get_mb_tx_first(priv) && quota > 0;
 	     reg_sr = at91_read(priv, AT91_SR),
-	     mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
+	     mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
 		at91_read_msg(dev, mb);
 
 		/* reactivate mailboxes */
-		if (mb == AT91_MB_RX_LOW_LAST)
+		if (mb == get_mb_rx_low_last(priv))
 			/* all lower mailboxed, if just finished it */
 			at91_activate_rx_low(priv);
-		else if (mb > AT91_MB_RX_LOW_LAST)
+		else if (mb > get_mb_rx_low_last(priv))
 			/* only the mailbox we read */
 			at91_activate_rx_mb(priv, mb);
 
@@ -635,9 +728,9 @@
 	}
 
 	/* upper group completed, look again in lower */
-	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
-	    quota > 0 && mb > AT91_MB_RX_LAST) {
-		priv->rx_next = AT91_MB_RX_FIRST;
+	if (priv->rx_next > get_mb_rx_low_last(priv) &&
+	    quota > 0 && mb > get_mb_rx_last(priv)) {
+		priv->rx_next = get_mb_rx_first(priv);
 		goto again;
 	}
 
@@ -720,7 +813,7 @@
 	u32 reg_sr = at91_read(priv, AT91_SR);
 	int work_done = 0;
 
-	if (reg_sr & AT91_IRQ_MB_RX)
+	if (reg_sr & get_irq_mb_rx(priv))
 		work_done += at91_poll_rx(dev, quota - work_done);
 
 	/*
@@ -734,7 +827,7 @@
 	if (work_done < quota) {
 		/* enable IRQs for frame errors and all mailboxes >= rx_next */
 		u32 reg_ier = AT91_IRQ_ERR_FRAME;
-		reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+		reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
 
 		napi_complete(napi);
 		at91_write(priv, AT91_IER, reg_ier);
@@ -783,7 +876,7 @@
 		if (likely(reg_msr & AT91_MSR_MRDY &&
 			   ~reg_msr & AT91_MSR_MABT)) {
 			/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
-			can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+			can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
 			dev->stats.tx_packets++;
 		}
 	}
@@ -793,8 +886,8 @@
 	 * we get a TX int for the last can frame directly before a
 	 * wrap around.
 	 */
-	if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
-	    (priv->tx_echo & AT91_NEXT_MASK) == 0)
+	if ((priv->tx_next & get_next_mask(priv)) != 0 ||
+	    (priv->tx_echo & get_next_mask(priv)) == 0)
 		netif_wake_queue(dev);
 }
 
@@ -906,6 +999,29 @@
 	at91_write(priv, AT91_IER, reg_ier);
 }
 
+static int at91_get_state_by_bec(const struct net_device *dev,
+		enum can_state *state)
+{
+	struct can_berr_counter bec;
+	int err;
+
+	err = at91_get_berr_counter(dev, &bec);
+	if (err)
+		return err;
+
+	if (bec.txerr < 96 && bec.rxerr < 96)
+		*state = CAN_STATE_ERROR_ACTIVE;
+	else if (bec.txerr < 128 && bec.rxerr < 128)
+		*state = CAN_STATE_ERROR_WARNING;
+	else if (bec.txerr < 256 && bec.rxerr < 256)
+		*state = CAN_STATE_ERROR_PASSIVE;
+	else
+		*state = CAN_STATE_BUS_OFF;
+
+	return 0;
+}
+
+
 static void at91_irq_err(struct net_device *dev)
 {
 	struct at91_priv *priv = netdev_priv(dev);
@@ -913,21 +1029,28 @@
 	struct can_frame *cf;
 	enum can_state new_state;
 	u32 reg_sr;
+	int err;
 
-	reg_sr = at91_read(priv, AT91_SR);
+	if (at91_is_sam9263(priv)) {
+		reg_sr = at91_read(priv, AT91_SR);
 
-	/* we need to look at the unmasked reg_sr */
-	if (unlikely(reg_sr & AT91_IRQ_BOFF))
-		new_state = CAN_STATE_BUS_OFF;
-	else if (unlikely(reg_sr & AT91_IRQ_ERRP))
-		new_state = CAN_STATE_ERROR_PASSIVE;
-	else if (unlikely(reg_sr & AT91_IRQ_WARN))
-		new_state = CAN_STATE_ERROR_WARNING;
-	else if (likely(reg_sr & AT91_IRQ_ERRA))
-		new_state = CAN_STATE_ERROR_ACTIVE;
-	else {
-		netdev_err(dev, "BUG! hardware in undefined state\n");
-		return;
+		/* we need to look at the unmasked reg_sr */
+		if (unlikely(reg_sr & AT91_IRQ_BOFF))
+			new_state = CAN_STATE_BUS_OFF;
+		else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+			new_state = CAN_STATE_ERROR_PASSIVE;
+		else if (unlikely(reg_sr & AT91_IRQ_WARN))
+			new_state = CAN_STATE_ERROR_WARNING;
+		else if (likely(reg_sr & AT91_IRQ_ERRA))
+			new_state = CAN_STATE_ERROR_ACTIVE;
+		else {
+			netdev_err(dev, "BUG! hardware in undefined state\n");
+			return;
+		}
+	} else {
+		err = at91_get_state_by_bec(dev, &new_state);
+		if (err)
+			return;
 	}
 
 	/* state hasn't changed */
@@ -968,19 +1091,19 @@
 	handled = IRQ_HANDLED;
 
 	/* Receive or error interrupt? -> napi */
-	if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+	if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
 		/*
 		 * The error bits are clear on read,
 		 * save for later use.
 		 */
 		priv->reg_sr = reg_sr;
 		at91_write(priv, AT91_IDR,
-			   AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+			   get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
 		napi_schedule(&priv->napi);
 	}
 
 	/* Transmission complete interrupt */
-	if (reg_sr & AT91_IRQ_MB_TX)
+	if (reg_sr & get_irq_mb_tx(priv))
 		at91_irq_tx(dev, reg_sr);
 
 	at91_irq_err(dev);
@@ -1123,6 +1246,8 @@
 
 static int __devinit at91_can_probe(struct platform_device *pdev)
 {
+	const struct at91_devtype_data *devtype_data;
+	enum at91_devtype devtype;
 	struct net_device *dev;
 	struct at91_priv *priv;
 	struct resource *res;
@@ -1130,6 +1255,9 @@
 	void __iomem *addr;
 	int err, irq;
 
+	devtype = pdev->id_entry->driver_data;
+	devtype_data = &at91_devtype_data[devtype];
+
 	clk = clk_get(&pdev->dev, "can_clk");
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "no clock defined\n");
@@ -1157,7 +1285,8 @@
 		goto exit_release;
 	}
 
-	dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
+	dev = alloc_candev(sizeof(struct at91_priv),
+			   1 << devtype_data->tx_shift);
 	if (!dev) {
 		err = -ENOMEM;
 		goto exit_iounmap;
@@ -1166,7 +1295,6 @@
 	dev->netdev_ops	= &at91_netdev_ops;
 	dev->irq = irq;
 	dev->flags |= IFF_ECHO;
-	dev->sysfs_groups[0] = &at91_sysfs_attr_group;
 
 	priv = netdev_priv(dev);
 	priv->can.clock.freq = clk_get_rate(clk);
@@ -1174,13 +1302,18 @@
 	priv->can.do_set_mode = at91_set_mode;
 	priv->can.do_get_berr_counter = at91_get_berr_counter;
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
-	priv->reg_base = addr;
 	priv->dev = dev;
+	priv->reg_base = addr;
+	priv->devtype_data = *devtype_data;
+	priv->devtype_data.type = devtype;
 	priv->clk = clk;
 	priv->pdata = pdev->dev.platform_data;
 	priv->mb0_id = 0x7ff;
 
-	netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+	netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+
+	if (at91_is_sam9263(priv))
+		dev->sysfs_groups[0] = &at91_sysfs_attr_group;
 
 	dev_set_drvdata(&pdev->dev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1230,13 +1363,26 @@
 	return 0;
 }
 
+static const struct platform_device_id at91_can_id_table[] = {
+	{
+		.name = "at91_can",
+		.driver_data = AT91_DEVTYPE_SAM9263,
+	}, {
+		.name = "at91sam9x5_can",
+		.driver_data = AT91_DEVTYPE_SAM9X5,
+	}, {
+		/* sentinel */
+	}
+};
+
 static struct platform_driver at91_can_driver = {
-	.probe		= at91_can_probe,
-	.remove		= __devexit_p(at91_can_remove),
-	.driver		= {
-		.name	= KBUILD_MODNAME,
-		.owner	= THIS_MODULE,
+	.probe = at91_can_probe,
+	.remove = __devexit_p(at91_can_remove),
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
 	},
+	.id_table = at91_can_id_table,
 };
 
 static int __init at91_can_module_init(void)
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index de8e778..78bd4ec 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -47,6 +47,7 @@
 #ifndef SJA1000_DEV_H
 #define SJA1000_DEV_H
 
+#include <linux/irqreturn.h>
 #include <linux/can/dev.h>
 #include <linux/can/platform/sja1000.h>
 
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 22ce03e..b414f5a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -75,6 +75,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/vmalloc.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 11a92af..e66c3d9 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1,6 +1,6 @@
 /* cnic.c: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -605,11 +605,12 @@
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
 
-static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
+			    u32 next)
 {
 	id_tbl->start = start_id;
 	id_tbl->max = size;
-	id_tbl->next = 0;
+	id_tbl->next = next;
 	spin_lock_init(&id_tbl->lock);
 	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
 	if (!id_tbl->table)
@@ -835,7 +836,6 @@
 	cp->ctx_blks = 0;
 
 	cnic_free_dma(dev, &cp->gbl_buf_info);
-	cnic_free_dma(dev, &cp->conn_buf_info);
 	cnic_free_dma(dev, &cp->kwq_info);
 	cnic_free_dma(dev, &cp->kwq_16_data_info);
 	cnic_free_dma(dev, &cp->kcq2.dma);
@@ -899,24 +899,56 @@
 	return 0;
 }
 
-static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
+static u16 cnic_bnx2_next_idx(u16 idx)
 {
-	int err, i, is_bnx2 = 0;
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static u16 cnic_bnx2x_next_idx(u16 idx)
+{
+	idx++;
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+
+	return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+	return idx;
+}
+
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
+			  bool use_pg_tbl)
+{
+	int err, i, use_page_tbl = 0;
 	struct kcqe **kcq;
 
-	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
-		is_bnx2 = 1;
+	if (use_pg_tbl)
+		use_page_tbl = 1;
 
-	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
+	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 	if (err)
 		return err;
 
 	kcq = (struct kcqe **) info->dma.pg_arr;
 	info->kcq = kcq;
 
-	if (is_bnx2)
+	info->next_idx = cnic_bnx2_next_idx;
+	info->hw_idx = cnic_bnx2_hw_idx;
+	if (use_pg_tbl)
 		return 0;
 
+	info->next_idx = cnic_bnx2x_next_idx;
+	info->hw_idx = cnic_bnx2x_hw_idx;
+
 	for (i = 0; i < KCQ_PAGE_CNT; i++) {
 		struct bnx2x_bd_chain_next *next =
 			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
@@ -1059,7 +1091,7 @@
 		goto error;
 	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
 
-	ret = cnic_alloc_kcq(dev, &cp->kcq1);
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
 	if (ret)
 		goto error;
 
@@ -1143,7 +1175,7 @@
 	cp->iscsi_start_cid = start_cid;
 	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
 		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
 		if (!cp->fcoe_init_cid)
@@ -1195,22 +1227,16 @@
 			j++;
 	}
 
-	ret = cnic_alloc_kcq(dev, &cp->kcq1);
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
 	if (ret)
 		goto error;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-		ret = cnic_alloc_kcq(dev, &cp->kcq2);
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
 		if (ret)
 			goto error;
 	}
 
-	pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
-			   BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
-	ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
-	if (ret)
-		goto error;
-
 	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
 	if (ret)
@@ -1577,6 +1603,7 @@
 	struct iscsi_context *ictx;
 	struct regpair context_addr;
 	int i, j, n = 2, n_max;
+	u8 port = CNIC_PORT(cp);
 
 	ctx->ctx_flags = 0;
 	if (!req2->num_additional_wqes)
@@ -1628,6 +1655,17 @@
 		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
 	ictx->xstorm_st_context.iscsi.flags.flags |=
 		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+		ETH_P_8021Q;
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+		cp->port_mode == CHIP_2_PORT_MODE) {
+
+		port = 0;
+	}
+	ictx->xstorm_st_context.common.flags =
+		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+	ictx->xstorm_st_context.common.flags =
+		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
 
 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
 	/* TSTORM requires the base address of RQ DB & not PTE */
@@ -1843,8 +1881,11 @@
 	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
 				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
 
-	if (ret == 0)
+	if (ret == 0) {
 		wait_event(ctx->waitq, ctx->wait_cond);
+		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+			return -EBUSY;
+	}
 
 	return ret;
 }
@@ -1879,8 +1920,10 @@
 skip_cfc_delete:
 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
 
-	atomic_dec(&cp->iscsi_conn);
-	clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	if (!ret) {
+		atomic_dec(&cp->iscsi_conn);
+		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	}
 
 destroy_reply:
 	memset(&kcqe, 0, sizeof(kcqe));
@@ -1939,8 +1982,6 @@
 		tstorm_buf->ka_interval = kwqe3->ka_interval;
 		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
 	}
-	tstorm_buf->rcv_buf = kwqe3->rcv_buf;
-	tstorm_buf->snd_buf = kwqe3->snd_buf;
 	tstorm_buf->max_rt_time = 0xffffffff;
 }
 
@@ -1969,15 +2010,14 @@
 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[4]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[2]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
-		 mac[1]);
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[0]);
 }
 
@@ -2156,7 +2196,7 @@
 	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
 
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	return ret;
 }
@@ -2201,12 +2241,9 @@
 	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
 	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
 	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
-	fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
-	fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
-	fcoe_init->eq_next_page_addr.lo =
-		cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
-	fcoe_init->eq_next_page_addr.hi =
-		(u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
+	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
 
 	fcoe_init->sb_num = cp->status_blk_num;
 	fcoe_init->eq_prod = MAX_KCQ_IDX;
@@ -2214,7 +2251,7 @@
 	cp->kcq2.sw_prod_idx = 0;
 
 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	*work = 3;
 	return ret;
@@ -2430,7 +2467,7 @@
 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
 
 	memset(&l5_data, 0, sizeof(l5_data));
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	return ret;
 }
@@ -2511,7 +2548,7 @@
 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
 		return -EAGAIN;		/* bnx2 is down */
 
-	if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
+	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
 		return -EINVAL;
 
 	for (i = 0; i < num_wqes; ) {
@@ -2651,32 +2688,6 @@
 		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
 }
 
-static u16 cnic_bnx2_next_idx(u16 idx)
-{
-	return idx + 1;
-}
-
-static u16 cnic_bnx2_hw_idx(u16 idx)
-{
-	return idx;
-}
-
-static u16 cnic_bnx2x_next_idx(u16 idx)
-{
-	idx++;
-	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
-		idx++;
-
-	return idx;
-}
-
-static u16 cnic_bnx2x_hw_idx(u16 idx)
-{
-	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
-		idx++;
-	return idx;
-}
-
 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -2687,12 +2698,12 @@
 	i = ri = last = info->sw_prod_idx;
 	ri &= MAX_KCQ_IDX;
 	hw_prod = *info->hw_prod_idx_ptr;
-	hw_prod = cp->hw_idx(hw_prod);
+	hw_prod = info->hw_idx(hw_prod);
 
 	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
 		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
 		cp->completed_kcq[kcqe_cnt++] = kcqe;
-		i = cp->next_idx(i);
+		i = info->next_idx(i);
 		ri = i & MAX_KCQ_IDX;
 		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
 			last_cnt = kcqe_cnt;
@@ -2778,13 +2789,10 @@
 
 		/* Tell compiler that status_blk fields can change. */
 		barrier();
-		if (status_idx != *cp->kcq1.status_idx_ptr) {
-			status_idx = (u16) *cp->kcq1.status_idx_ptr;
-			/* status block index must be read first */
-			rmb();
-			cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
-		} else
-			break;
+		status_idx = (u16) *cp->kcq1.status_idx_ptr;
+		/* status block index must be read first */
+		rmb();
+		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 	}
 
 	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
@@ -2908,8 +2916,6 @@
 
 		/* Tell compiler that sblk fields can change. */
 		barrier();
-		if (last_status == *info->status_idx_ptr)
-			break;
 
 		last_status = *info->status_idx_ptr;
 		/* status block index must be read before reading the KCQ */
@@ -2933,7 +2939,7 @@
 		CNIC_WR16(dev, cp->kcq1.io_addr,
 			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-		if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
 					   status_idx, IGU_INT_ENABLE, 1);
 			break;
@@ -3052,13 +3058,21 @@
 		break;
 	}
 	case CNIC_CTL_COMPLETION_CMD: {
-		u32 cid = BNX2X_SW_CID(info->data.comp.cid);
+		struct cnic_ctl_completion *comp = &info->data.comp;
+		u32 cid = BNX2X_SW_CID(comp->cid);
 		u32 l5_cid;
 		struct cnic_local *cp = dev->cnic_priv;
 
 		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
 			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
 
+			if (unlikely(comp->error)) {
+				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+				netdev_err(dev->netdev,
+					   "CID %x CFC delete comp error %x\n",
+					   cid, comp->error);
+			}
+
 			ctx->wait_cond = 1;
 			wake_up(&ctx->waitq);
 		}
@@ -3772,7 +3786,13 @@
 		break;
 
 	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
-		cnic_cm_upcall(cp, csk, opcode);
+		/* after we already sent CLOSE_REQ */
+		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
+		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
+		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
+		else
+			cnic_cm_upcall(cp, csk, opcode);
 		break;
 	}
 	csk_put(csk);
@@ -3803,14 +3823,17 @@
 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
+	u32 port_id;
 
 	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
 			      GFP_KERNEL);
 	if (!cp->csk_tbl)
 		return -ENOMEM;
 
+	get_random_bytes(&port_id, sizeof(port_id));
+	port_id %= CNIC_LOCAL_PORT_RANGE;
 	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
-			     CNIC_LOCAL_PORT_MIN)) {
+			     CNIC_LOCAL_PORT_MIN, port_id)) {
 		cnic_cm_free_mem(dev);
 		return -ENOMEM;
 	}
@@ -3826,12 +3849,14 @@
 	}
 
 	/* 1. If event opcode matches the expected event in csk->state
-	 * 2. If the expected event is CLOSE_COMP, we accept any event
+	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
+	 *    event
 	 * 3. If the expected event is 0, meaning the connection was never
 	 *    never established, we accept the opcode from cm_abort.
 	 */
 	if (opcode == csk->state || csk->state == 0 ||
-	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
+	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
 		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
 			if (csk->state == 0)
 				csk->state = opcode;
@@ -3922,10 +3947,17 @@
 
 	for (i = 0; i < cp->max_cid_space; i++) {
 		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int j;
 
 		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
 			msleep(10);
 
+		for (j = 0; j < 5; j++) {
+			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+				break;
+			msleep(20);
+		}
+
 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
 			netdev_warn(dev->netdev, "CID %x not deleted\n",
 				   ctx->cid);
@@ -3992,6 +4024,7 @@
 
 	for (i = 0; i < cp->max_cid_space; i++) {
 		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int err;
 
 		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
 		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
@@ -4005,13 +4038,15 @@
 		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
 			continue;
 
-		cnic_bnx2x_destroy_ramrod(dev, i);
+		err = cnic_bnx2x_destroy_ramrod(dev, i);
 
 		cnic_free_bnx2x_conn_resc(dev, i);
-		if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
-			atomic_dec(&cp->iscsi_conn);
+		if (!err) {
+			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+				atomic_dec(&cp->iscsi_conn);
 
-		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+		}
 	}
 
 	if (need_resched)
@@ -4218,14 +4253,6 @@
 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
 }
 
-static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
-{
-	u32 max_conn;
-
-	max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
-	dev->max_iscsi_conn = max_conn;
-}
-
 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -4550,8 +4577,6 @@
 		return err;
 	}
 
-	cnic_get_bnx2_iscsi_info(dev);
-
 	return 0;
 }
 
@@ -4617,7 +4642,7 @@
 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
 			offsetof(struct hc_status_block_data_e1x, index_data) +
 			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
-			offsetof(struct hc_index_data, timeout), 64 / 12);
+			offsetof(struct hc_index_data, timeout), 64 / 4);
 	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
 }
 
@@ -4633,7 +4658,6 @@
 	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
-	int port = CNIC_PORT(cp);
 	int i;
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	u32 val;
@@ -4674,10 +4698,9 @@
 
 	/* reset xstorm per client statistics */
 	if (cli < MAX_STAT_COUNTER_ID) {
-		val = BAR_XSTRORM_INTMEM +
-		      XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
+		data->general.statistics_zero_flg = 1;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_counter_id = cli;
 	}
 
 	cp->tx_cons_ptr =
@@ -4695,7 +4718,6 @@
 				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
 	int i;
-	int port = CNIC_PORT(cp);
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
 	u32 val;
@@ -4703,10 +4725,10 @@
 
 	/* General data */
 	data->general.client_id = cli;
-	data->general.statistics_en_flg = 1;
-	data->general.statistics_counter_id = cli;
 	data->general.activate_flg = 1;
 	data->general.sp_client_id = cli;
+	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->general.func_id = cp->pfid;
 
 	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
 		dma_addr_t buf_map;
@@ -4740,23 +4762,12 @@
 	data->rx.status_block_id = BNX2X_DEF_SB_ID;
 
 	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
-	data->rx.bd_buff_size =	cpu_to_le16(cp->l2_single_buf_size);
 
-	data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
 	data->rx.outer_vlan_removal_enable_flg = 1;
-
-	/* reset tstorm and ustorm per client statistics */
-	if (cli < MAX_STAT_COUNTER_ID) {
-		val = BAR_TSTRORM_INTMEM +
-		      TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
-
-		val = BAR_USTRORM_INTMEM +
-		      USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
-	}
+	data->rx.silent_vlan_removal_flg = 1;
+	data->rx.silent_vlan_value = 0;
+	data->rx.silent_vlan_mask = 0xffff;
 
 	cp->rx_cons_ptr =
 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
@@ -4772,7 +4783,7 @@
 			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
 	cp->kcq1.sw_prod_idx = 0;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
 
 		cp->kcq1.hw_prod_idx_ptr =
@@ -4788,7 +4799,7 @@
 			&sb->sb.running_index[SM_RX_ID];
 	}
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
 
 		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -4805,10 +4816,12 @@
 {
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_eth_dev *ethdev = cp->ethdev;
-	int func = CNIC_FUNC(cp), ret, i;
+	int func = CNIC_FUNC(cp), ret;
 	u32 pfid;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	cp->port_mode = CHIP_PORT_MODE_NONE;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
 
 		if (!(val & 1))
@@ -4816,25 +4829,28 @@
 		else
 			val = (val >> 1) & 1;
 
-		if (val)
+		if (val) {
+			cp->port_mode = CHIP_4_PORT_MODE;
 			cp->pfid = func >> 1;
-		else
+		} else {
+			cp->port_mode = CHIP_4_PORT_MODE;
 			cp->pfid = func & 0x6;
+		}
 	} else {
 		cp->pfid = func;
 	}
 	pfid = cp->pfid;
 
 	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
-			       cp->iscsi_start_cid);
+			       cp->iscsi_start_cid, 0);
 
 	if (ret)
 		return -ENOMEM;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
 					BNX2X_FCOE_NUM_CONNECTIONS,
-					cp->fcoe_start_cid);
+					cp->fcoe_start_cid, 0);
 
 		if (ret)
 			return -ENOMEM;
@@ -4868,15 +4884,6 @@
 		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
 		HC_INDEX_ISCSI_EQ_CONS);
 
-	for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
-		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
-			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
-			cp->conn_buf_info.pgtbl[2 * i]);
-		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
-			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
-			cp->conn_buf_info.pgtbl[(2 * i) + 1]);
-	}
-
 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
 		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
@@ -4924,7 +4931,7 @@
 		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
 
 		off = BAR_USTRORM_INTMEM +
-			(BNX2X_CHIP_IS_E2(cp->chip_id) ?
+			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
 			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
 			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
 
@@ -5217,6 +5224,8 @@
 	cdev->pcidev = pdev;
 	cp->chip_id = ethdev->chip_id;
 
+	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+
 	cp->cnic_ops = &cnic_bnx2_ops;
 	cp->start_hw = cnic_start_bnx2_hw;
 	cp->stop_hw = cnic_stop_bnx2_hw;
@@ -5228,8 +5237,6 @@
 	cp->enable_int = cnic_enable_bnx2_int;
 	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
 	cp->close_conn = cnic_close_bnx2_conn;
-	cp->next_idx = cnic_bnx2_next_idx;
-	cp->hw_idx = cnic_bnx2_hw_idx;
 	return cdev;
 
 cnic_err:
@@ -5274,7 +5281,7 @@
 
 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
-	if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
 	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
 		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
 
@@ -5290,13 +5297,11 @@
 	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
 	cp->enable_int = cnic_enable_bnx2x_int;
 	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
-	if (BNX2X_CHIP_IS_E2(cp->chip_id))
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
 		cp->ack_int = cnic_ack_bnx2x_e2_msix;
 	else
 		cp->ack_int = cnic_ack_bnx2x_msix;
 	cp->close_conn = cnic_close_bnx2x_conn;
-	cp->next_idx = cnic_bnx2x_next_idx;
-	cp->hw_idx = cnic_bnx2x_hw_idx;
 	return cdev;
 }
 
@@ -5335,7 +5340,7 @@
 
 	dev = cnic_from_netdev(netdev);
 
-	if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
 		/* Check for the hot-plug device */
 		dev = is_cnic_dev(netdev);
 		if (dev) {
@@ -5351,7 +5356,7 @@
 		else if (event == NETDEV_UNREGISTER)
 			cnic_ulp_exit(dev);
 
-		if (event == NETDEV_UP) {
+		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
 			if (cnic_register_netdev(dev) != 0) {
 				cnic_put(dev);
 				goto done;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 3367a6d3..330ef93 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -1,6 +1,6 @@
 /* cnic.h: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -68,11 +68,6 @@
 #define BNX2_PG_CTX_MAP			0x1a0034
 #define BNX2_ISCSI_CTX_MAP		0x1a0074
 
-struct cnic_redirect_entry {
-	struct dst_entry *old_dst;
-	struct dst_entry *new_dst;
-};
-
 #define MAX_COMPLETED_KCQE	64
 
 #define MAX_CNIC_L5_CONTEXT	256
@@ -171,6 +166,7 @@
 	unsigned long		ctx_flags;
 #define	CTX_FL_OFFLD_START	0
 #define	CTX_FL_DELETE_WAIT	1
+#define	CTX_FL_CID_ERROR	2
 	u8			ulp_proto_id;
 	union {
 		struct cnic_iscsi	*iscsi;
@@ -185,6 +181,9 @@
 	u16		sw_prod_idx;
 	u16		*status_idx_ptr;
 	u32		io_addr;
+
+	u16		(*next_idx)(u16);
+	u16		(*hw_idx)(u16);
 };
 
 struct iro {
@@ -242,7 +241,7 @@
 	u16		rx_cons;
 	u16		tx_cons;
 
-	struct iro		*iro_arr;
+	const struct iro	*iro_arr;
 #define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
 
 	struct cnic_dma		kwq_info;
@@ -283,7 +282,6 @@
 	struct cnic_sock	*csk_tbl;
 	struct cnic_id_tbl	csk_port_tbl;
 
-	struct cnic_dma		conn_buf_info;
 	struct cnic_dma		gbl_buf_info;
 
 	struct cnic_iscsi	*iscsi_tbl;
@@ -317,6 +315,11 @@
 	u32			chip_id;
 	int			func;
 	u32			pfid;
+	u8			port_mode;
+#define CHIP_4_PORT_MODE	0
+#define CHIP_2_PORT_MODE	1
+#define CHIP_PORT_MODE_NONE	2
+
 	u32			shmem_base;
 
 	struct cnic_ops		*cnic_ops;
@@ -332,8 +335,6 @@
 	void			(*disable_int_sync)(struct cnic_dev *);
 	void			(*ack_int)(struct cnic_dev *);
 	void			(*close_conn)(struct cnic_sock *, u32 opcode);
-	u16			(*next_idx)(u16);
-	u16			(*hw_idx)(u16);
 };
 
 struct bnx2x_bd_chain_next {
@@ -368,7 +369,6 @@
 #define BNX2X_ISCSI_MAX_PENDING_R2TS	4
 #define BNX2X_ISCSI_R2TQE_SIZE		8
 #define BNX2X_ISCSI_HQ_BD_SIZE		64
-#define BNX2X_ISCSI_CONN_BUF_SIZE	64
 #define BNX2X_ISCSI_GLB_BUF_SIZE	64
 #define BNX2X_ISCSI_PBL_NOT_CACHED	0xff
 #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED	0xff
@@ -405,6 +405,7 @@
 #define BNX2X_CHIP_IS_E2(x)		\
 	(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
 	 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+#define BNX2X_CHIP_IS_E2_PLUS(x) BNX2X_CHIP_IS_E2(x)
 
 #define IS_E1H_OFFSET       		BNX2X_CHIP_IS_E1H(cp->chip_id)
 
@@ -441,8 +442,8 @@
 
 #define CNIC_PORT(cp)			((cp)->pfid & 1)
 #define CNIC_FUNC(cp)			((cp)->func)
-#define CNIC_PATH(cp)			(!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
-					 (CNIC_FUNC(cp) & 1))
+#define CNIC_PATH(cp)			(!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
+					 0 : (CNIC_FUNC(cp) & 1))
 #define CNIC_E1HVN(cp)			((cp)->pfid >> 1)
 
 #define BNX2X_HW_CID(cp, x)		((CNIC_PORT(cp) << 23) | \
@@ -451,10 +452,15 @@
 #define BNX2X_SW_CID(x)			(x & 0x1ffff)
 
 #define BNX2X_CL_QZONE_ID(cp, cli)					\
-		(cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
-					ETH_MAX_RX_CLIENTS_E2 :		\
-					ETH_MAX_RX_CLIENTS_E1H)))
+		(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli :		\
+		 cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
 
-#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK	(0<<4)
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID						\
+	(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H :	\
+	 ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+	  MAX_STAT_COUNTER_ID_E1))
+#endif
+
 #endif
 
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index fdbc004..e47d210 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -1,7 +1,7 @@
 
 /* cnic.c: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2009 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -45,13 +45,13 @@
 #define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION  (0x20)
 #define FCOE_KCQE_OPCODE_FCOE_ERROR				(0x21)
 
-#define FCOE_RAMROD_CMD_ID_INIT			(FCOE_KCQE_OPCODE_INIT_FUNC)
-#define FCOE_RAMROD_CMD_ID_DESTROY		(FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_INIT_FUNC		(FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC		(FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_STAT_FUNC		(FCOE_KCQE_OPCODE_STAT_FUNC)
 #define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN		(FCOE_KCQE_OPCODE_OFFLOAD_CONN)
 #define FCOE_RAMROD_CMD_ID_ENABLE_CONN		(FCOE_KCQE_OPCODE_ENABLE_CONN)
 #define FCOE_RAMROD_CMD_ID_DISABLE_CONN		(FCOE_KCQE_OPCODE_DISABLE_CONN)
 #define FCOE_RAMROD_CMD_ID_DESTROY_CONN		(FCOE_KCQE_OPCODE_DESTROY_CONN)
-#define FCOE_RAMROD_CMD_ID_STAT			(FCOE_KCQE_OPCODE_STAT_FUNC)
 #define FCOE_RAMROD_CMD_ID_TERMINATE_CONN	(0x81)
 
 #define FCOE_KWQE_OPCODE_INIT1                  (0)
@@ -641,20 +641,20 @@
 #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
 #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
 #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14
 #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
 #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
 #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
 #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22
 #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
 #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
 #define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
@@ -694,574 +694,668 @@
 #endif
 #if defined(__BIG_ENDIAN)
 	u16 __reserved64;
-	u16 __cq_u_prod0;
+	u16 cq_u_prod;
 #elif defined(__LITTLE_ENDIAN)
-	u16 __cq_u_prod0;
+	u16 cq_u_prod;
 	u16 __reserved64;
 #endif
 	u32 __cq_u_prod1;
 #if defined(__BIG_ENDIAN)
 	u16 __agg_vars3;
-	u16 __cq_u_prod2;
+	u16 cq_u_pend;
 #elif defined(__LITTLE_ENDIAN)
-	u16 __cq_u_prod2;
+	u16 cq_u_pend;
 	u16 __agg_vars3;
 #endif
 #if defined(__BIG_ENDIAN)
 	u16 __aux2_th;
-	u16 __cq_u_prod3;
+	u16 aux2_val;
 #elif defined(__LITTLE_ENDIAN)
-	u16 __cq_u_prod3;
+	u16 aux2_val;
 	u16 __aux2_th;
 #endif
 };
 
 /*
- * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and used in FCoE context section
+ * The fcoe extra aggregative context section of Tstorm
  */
-struct ustorm_fcoe_params {
+struct tstorm_fcoe_extra_ag_context_section {
+	u32 __agg_val1;
 #if defined(__BIG_ENDIAN)
-	u16 fcoe_conn_id;
-	u16 flags;
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
-#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
-#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
-#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
-#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
-#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
-#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
-#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
-#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
-#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
 #elif defined(__LITTLE_ENDIAN)
-	u16 flags;
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
-#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
-#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
-#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
-#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
-#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
-#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
-#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
-#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
-#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
-	u16 fcoe_conn_id;
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
 #endif
 #if defined(__BIG_ENDIAN)
-	u8 hc_csdm_byte_en;
-	u8 func_id;
-	u8 port_id;
-	u8 vnic_id;
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
 #elif defined(__LITTLE_ENDIAN)
-	u8 vnic_id;
-	u8 port_id;
-	u8 func_id;
-	u8 hc_csdm_byte_en;
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
 #endif
-#if defined(__BIG_ENDIAN)
-	u16 rx_total_conc_seqs;
-	u16 rx_max_fc_pay_len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_max_fc_pay_len;
-	u16 rx_total_conc_seqs;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 ox_id;
-	u16 rx_max_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_max_conc_seqs;
-	u16 ox_id;
-#endif
+	u32 __lcq_prod;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 __lcq_cons;
+	u32 __reserved2;
 };
 
 /*
- * FCoE 16-bits index structure
+ * The fcoe aggregative context of Tstorm
  */
-struct fcoe_idx16_fields {
-	u16 fields;
-#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
-#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
-#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
-#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+struct tstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_fcoe_extra_ag_context_section __extra_section;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Tstorm
+ */
+struct tstorm_tcp_tcp_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 snd_nxt;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 snd_una;
+	u32 __reserved2;
 };
 
 /*
- * FCoE 16-bits index union
+ * The iscsi aggregative context of Tstorm
  */
-union fcoe_idx16_field_union {
-	struct fcoe_idx16_fields fields;
-	u16 val;
+struct tstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_tcp_tcp_ag_context_section tcp;
 };
 
+
+
 /*
- * 4 regs size
+ * The fcoe aggregative context of Ustorm
  */
-struct fcoe_bd_ctx {
-	u32 buf_addr_hi;
-	u32 buf_addr_lo;
+struct ustorm_fcoe_ag_context {
 #if defined(__BIG_ENDIAN)
-	u16 rsrv0;
-	u16 buf_len;
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
 #elif defined(__LITTLE_ENDIAN)
-	u16 buf_len;
-	u16 rsrv0;
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
 #endif
 #if defined(__BIG_ENDIAN)
-	u16 rsrv1;
-	u16 flags;
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 pbf_tx_seq_ack;
 #elif defined(__LITTLE_ENDIAN)
-	u16 flags;
-	u16 rsrv1;
+	u16 pbf_tx_seq_ack;
+	u8 agg_misc2;
+	u8 cdu_usage;
 #endif
-};
-
-/*
- * Parameters required for placement according to SGL
- */
-struct ustorm_fcoe_data_place {
+	u32 agg_misc4;
 #if defined(__BIG_ENDIAN)
-	u16 cached_sge_off;
-	u8 cached_num_sges;
-	u8 cached_sge_idx;
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
 #elif defined(__LITTLE_ENDIAN)
-	u8 cached_sge_idx;
-	u8 cached_num_sges;
-	u16 cached_sge_off;
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
 #endif
-	struct fcoe_bd_ctx cached_sge[3];
-};
-
-struct fcoe_task_ctx_entry_txwr_rxrd {
+	u32 expired_task_id;
+	u32 agg_misc4_th;
 #if defined(__BIG_ENDIAN)
-	u16 verify_tx_seq;
-	u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
-	u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
-#elif defined(__LITTLE_ENDIAN)
-	u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
-	u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
-	u16 verify_tx_seq;
-#endif
-};
-
-struct fcoe_fcp_cmd_payload {
-	u32 opaque[8];
-};
-
-struct fcoe_fc_hdr {
-#if defined(__BIG_ENDIAN)
-	u8 cs_ctl;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 s_id[3];
-	u8 cs_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 r_ctl;
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 d_id[3];
-	u8 r_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 seq_id;
-	u8 df_ctl;
-	u16 seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
-	u16 seq_cnt;
-	u8 df_ctl;
-	u8 seq_id;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 type;
-	u8 f_ctl[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 f_ctl[3];
-	u8 type;
-#endif
-	u32 parameters;
-#if defined(__BIG_ENDIAN)
-	u16 ox_id;
-	u16 rx_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_id;
-	u16 ox_id;
-#endif
-};
-
-struct fcoe_fc_frame {
-	struct fcoe_fc_hdr fc_hdr;
-	u32 reserved0[2];
-};
-
-union fcoe_cmd_flow_info {
-	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
-	struct fcoe_fc_frame mp_fc_frame;
-};
-
-struct fcoe_read_flow_info {
-	struct fcoe_fc_hdr fc_data_in_hdr;
-	u32 reserved[2];
-};
-
-struct fcoe_fcp_xfr_rdy_payload {
-	u32 burst_len;
-	u32 data_ro;
-};
-
-struct fcoe_write_flow_info {
-	struct fcoe_fc_hdr fc_data_out_hdr;
-	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
-};
-
-struct fcoe_fcp_rsp_flags {
-	u8 flags;
-#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
-#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
-#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
-#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
-#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
-#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
-#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
-#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
-};
-
-struct fcoe_fcp_rsp_payload {
-	struct regpair reserved0;
-	u32 fcp_resid;
-#if defined(__BIG_ENDIAN)
-	u16 retry_delay_timer;
-	struct fcoe_fcp_rsp_flags fcp_flags;
-	u8 scsi_status_code;
-#elif defined(__LITTLE_ENDIAN)
-	u8 scsi_status_code;
-	struct fcoe_fcp_rsp_flags fcp_flags;
-	u16 retry_delay_timer;
-#endif
-	u32 fcp_rsp_len;
-	u32 fcp_sns_len;
-};
-
-/*
- * Fixed size structure in order to plant it in Union structure
- */
-struct fcoe_fcp_rsp_union {
-	struct fcoe_fcp_rsp_payload payload;
-	struct regpair reserved0;
-};
-
-/*
- * Fixed size structure in order to plant it in Union structure
- */
-struct fcoe_abts_rsp_union {
-	u32 r_ctl;
-	u32 abts_rsp_payload[7];
-};
-
-union fcoe_rsp_flow_info {
-	struct fcoe_fcp_rsp_union fcp_rsp;
-	struct fcoe_abts_rsp_union abts_rsp;
-};
-
-struct fcoe_cleanup_flow_info {
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u16 task_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 task_id;
-	u16 reserved1;
-#endif
-	u32 reserved2[7];
-};
-
-/*
- * 32 bytes used for general purposes
- */
-union fcoe_general_task_ctx {
-	union fcoe_cmd_flow_info cmd_info;
-	struct fcoe_read_flow_info read_info;
-	struct fcoe_write_flow_info write_info;
-	union fcoe_rsp_flow_info rsp_info;
-	struct fcoe_cleanup_flow_info cleanup_info;
-	u32 comp_info[8];
-};
-
-struct fcoe_s_stat_ctx {
-	u8 flags;
-#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
-#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
-#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
-#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
-#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
-#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
-#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
-#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
-#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
-#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
-#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
-#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
-#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
-#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
-};
-
-/*
- * Common section. Both TX and RX processing might write and read from it in different flows
- */
-struct fcoe_task_ctx_entry_tx_rx_cmn {
-	u32 data_2_trns;
-	union fcoe_general_task_ctx general;
-#if defined(__BIG_ENDIAN)
-	u16 tx_low_seq_cnt;
-	struct fcoe_s_stat_ctx tx_s_stat;
-	u8 tx_seq_id;
-#elif defined(__LITTLE_ENDIAN)
-	u8 tx_seq_id;
-	struct fcoe_s_stat_ctx tx_s_stat;
-	u16 tx_low_seq_cnt;
-#endif
-	u32 common_flags;
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
-};
-
-struct fcoe_task_ctx_entry_rxwr_txrd {
-#if defined(__BIG_ENDIAN)
-	u16 rx_id;
-	u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-	u16 rx_id;
-#endif
-};
-
-struct fcoe_seq_ctx {
-#if defined(__BIG_ENDIAN)
-	u16 low_seq_cnt;
-	struct fcoe_s_stat_ctx s_stat;
-	u8 seq_id;
-#elif defined(__LITTLE_ENDIAN)
-	u8 seq_id;
-	struct fcoe_s_stat_ctx s_stat;
-	u16 low_seq_cnt;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 err_seq_cnt;
-	u16 high_seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
-	u16 high_seq_cnt;
-	u16 err_seq_cnt;
-#endif
-	u32 low_exp_ro;
-	u32 high_exp_ro;
-};
-
-struct fcoe_single_sge_ctx {
-	struct regpair cur_buf_addr;
-#if defined(__BIG_ENDIAN)
-	u16 reserved0;
-	u16 cur_buf_rem;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cur_buf_rem;
-	u16 reserved0;
-#endif
-};
-
-struct fcoe_mul_sges_ctx {
-	struct regpair cur_sge_addr;
-#if defined(__BIG_ENDIAN)
-	u8 sgl_size;
-	u8 cur_sge_idx;
-	u16 cur_sge_off;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cur_sge_off;
-	u8 cur_sge_idx;
-	u8 sgl_size;
-#endif
-};
-
-union fcoe_sgl_ctx {
-	struct fcoe_single_sge_ctx single_sge;
-	struct fcoe_mul_sges_ctx mul_sges;
-};
-
-struct fcoe_task_ctx_entry_rx_only {
-	struct fcoe_seq_ctx seq_ctx;
-	struct fcoe_seq_ctx ooo_seq_ctx;
-	u32 rsrv3;
-	union fcoe_sgl_ctx sgl_ctx;
-};
-
-struct ustorm_fcoe_task_ctx_entry_rd {
-	struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
-	struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
-	struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
-	struct fcoe_task_ctx_entry_rx_only rx_wr;
-	u32 reserved;
-};
-
-/*
- * Ustorm FCoE Storm Context
- */
-struct ustorm_fcoe_st_context {
-	struct ustorm_fcoe_params fcoe_params;
-	struct regpair task_addr;
-	struct regpair cq_base_addr;
-	struct regpair rq_pbl_base;
-	struct regpair rq_cur_page_addr;
-	struct regpair confq_pbl_base_addr;
-	struct regpair conn_db_base;
-	struct regpair xfrq_base_addr;
-	struct regpair lcq_base_addr;
-#if defined(__BIG_ENDIAN)
-	union fcoe_idx16_field_union rq_cons;
-	union fcoe_idx16_field_union rq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	union fcoe_idx16_field_union rq_prod;
-	union fcoe_idx16_field_union rq_cons;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 xfrq_prod;
+	u16 cq_prod;
 	u16 cq_cons;
 #elif defined(__LITTLE_ENDIAN)
 	u16 cq_cons;
-	u16 xfrq_prod;
+	u16 cq_prod;
 #endif
 #if defined(__BIG_ENDIAN)
-	u16 lcq_cons;
-	u16 hc_cram_address;
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
 #elif defined(__LITTLE_ENDIAN)
-	u16 hc_cram_address;
-	u16 lcq_cons;
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
 #endif
-#if defined(__BIG_ENDIAN)
-	u16 sq_xfrq_lcq_confq_size;
-	u16 confq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 confq_prod;
-	u16 sq_xfrq_lcq_confq_size;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 hc_csdm_agg_int;
-	u8 flags;
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
-#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
-#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
-	u8 available_rqes;
-	u8 sp_q_flush_cnt;
-#elif defined(__LITTLE_ENDIAN)
-	u8 sp_q_flush_cnt;
-	u8 available_rqes;
-	u8 flags;
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
-#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
-#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
-	u8 hc_csdm_agg_int;
-#endif
-	struct ustorm_fcoe_data_place data_place;
-	struct ustorm_fcoe_task_ctx_entry_rd tce;
 };
 
+
 /*
- * The FCoE non-aggregative context of Tstorm
+ * The iscsi aggregative context of Ustorm
  */
-struct tstorm_fcoe_st_context {
-	struct regpair reserved0;
-	struct regpair reserved1;
+struct ustorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 __cq_local_comp_itt_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_local_comp_itt_val;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 agg_val1;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
 };
 
+
 /*
  * The fcoe aggregative context section of Xstorm
  */
@@ -1272,8 +1366,8 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
@@ -1288,20 +1382,20 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
 #endif
-	u32 __task_addr_lo;
-	u32 __task_addr_hi;
+	u32 snd_nxt;
+	u32 tx_wnd;
 	u32 __reserved55;
-	u32 __tx_prods;
+	u32 local_adv_wnd;
 #if defined(__BIG_ENDIAN)
 	u8 __agg_val8_th;
-	u8 __agg_val8;
+	u8 __tx_dest;
 	u16 tcp_agg_vars2;
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
@@ -1317,8 +1411,8 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -1327,8 +1421,8 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
 #elif defined(__LITTLE_ENDIAN)
 	u16 tcp_agg_vars2;
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
@@ -1345,8 +1439,8 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -1355,9 +1449,9 @@
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
 #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
-	u8 __agg_val8;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
 	u8 __agg_val8_th;
 #endif
 	u32 __sq_base_addr_lo;
@@ -1591,9 +1685,9 @@
 #if defined(__BIG_ENDIAN)
 	u8 __reserved1;
 	u8 __agg_val6_th;
-	u16 __confq_tx_prod;
+	u16 __agg_val9;
 #elif defined(__LITTLE_ENDIAN)
-	u16 __confq_tx_prod;
+	u16 __agg_val9;
 	u8 __agg_val6_th;
 	u8 __reserved1;
 #endif
@@ -1605,16 +1699,16 @@
 	u16 confq_cons;
 #endif
 	u32 agg_vars8;
-#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX (0xFFFFFF<<0)
-#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0
 #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
 #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
 #if defined(__BIG_ENDIAN)
-	u16 ox_id;
+	u16 agg_misc0;
 	u16 sq_prod;
 #elif defined(__LITTLE_ENDIAN)
 	u16 sq_prod;
-	u16 ox_id;
+	u16 agg_misc0;
 #endif
 #if defined(__BIG_ENDIAN)
 	u8 agg_val3;
@@ -1628,986 +1722,51 @@
 	u8 agg_val3;
 #endif
 #if defined(__BIG_ENDIAN)
-	u16 __pbf_tx_seq_ack;
+	u16 __agg_misc1;
 	u16 agg_limit1;
 #elif defined(__LITTLE_ENDIAN)
 	u16 agg_limit1;
-	u16 __pbf_tx_seq_ack;
+	u16 __agg_misc1;
 #endif
 	u32 completion_seq;
 	u32 confq_pbl_base_lo;
 	u32 confq_pbl_base_hi;
 };
 
-/*
- * The fcoe extra aggregative context section of Tstorm
- */
-struct tstorm_fcoe_extra_ag_context_section {
-	u32 __agg_val1;
-#if defined(__BIG_ENDIAN)
-	u8 __tcp_agg_vars2;
-	u8 __agg_val3;
-	u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
-	u16 __agg_val2;
-	u8 __agg_val3;
-	u8 __tcp_agg_vars2;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 __agg_val5;
-	u8 __agg_val6;
-	u8 __tcp_agg_vars3;
-#elif defined(__LITTLE_ENDIAN)
-	u8 __tcp_agg_vars3;
-	u8 __agg_val6;
-	u16 __agg_val5;
-#endif
-	u32 __lcq_prod;
-	u32 rtt_seq;
-	u32 rtt_time;
-	u32 __reserved66;
-	u32 wnd_right_edge;
-	u32 tcp_agg_vars1;
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
-	u32 snd_max;
-	u32 __lcq_cons;
-	u32 __reserved2;
-};
 
-/*
- * The fcoe aggregative context of Tstorm
- */
-struct tstorm_fcoe_ag_context {
-#if defined(__BIG_ENDIAN)
-	u16 ulp_credit;
-	u8 agg_vars1;
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
-	u8 state;
-#elif defined(__LITTLE_ENDIAN)
-	u8 state;
-	u8 agg_vars1;
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
-	u16 ulp_credit;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 __agg_val4;
-	u16 agg_vars2;
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
-#elif defined(__LITTLE_ENDIAN)
-	u16 agg_vars2;
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
-	u16 __agg_val4;
-#endif
-	struct tstorm_fcoe_extra_ag_context_section __extra_section;
-};
-
-/*
- * The fcoe aggregative context of Ustorm
- */
-struct ustorm_fcoe_ag_context {
-#if defined(__BIG_ENDIAN)
-	u8 __aux_counter_flags;
-	u8 agg_vars2;
-#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
-	u8 agg_vars1;
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
-	u8 state;
-#elif defined(__LITTLE_ENDIAN)
-	u8 state;
-	u8 agg_vars1;
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
-	u8 agg_vars2;
-#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
-	u8 __aux_counter_flags;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 cdu_usage;
-	u8 agg_misc2;
-	u16 pbf_tx_seq_ack;
-#elif defined(__LITTLE_ENDIAN)
-	u16 pbf_tx_seq_ack;
-	u8 agg_misc2;
-	u8 cdu_usage;
-#endif
-	u32 agg_misc4;
-#if defined(__BIG_ENDIAN)
-	u8 agg_val3_th;
-	u8 agg_val3;
-	u16 agg_misc3;
-#elif defined(__LITTLE_ENDIAN)
-	u16 agg_misc3;
-	u8 agg_val3;
-	u8 agg_val3_th;
-#endif
-	u32 expired_task_id;
-	u32 agg_misc4_th;
-#if defined(__BIG_ENDIAN)
-	u16 cq_prod;
-	u16 cq_cons;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cq_cons;
-	u16 cq_prod;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 __reserved2;
-	u8 decision_rules;
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
-	u8 decision_rule_enable_bits;
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
-#elif defined(__LITTLE_ENDIAN)
-	u8 decision_rule_enable_bits;
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
-	u8 decision_rules;
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
-	u16 __reserved2;
-#endif
-};
-
-/*
- * Ethernet context section
- */
-struct xstorm_fcoe_eth_context_section {
-#if defined(__BIG_ENDIAN)
-	u8 remote_addr_4;
-	u8 remote_addr_5;
-	u8 local_addr_0;
-	u8 local_addr_1;
-#elif defined(__LITTLE_ENDIAN)
-	u8 local_addr_1;
-	u8 local_addr_0;
-	u8 remote_addr_5;
-	u8 remote_addr_4;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 remote_addr_0;
-	u8 remote_addr_1;
-	u8 remote_addr_2;
-	u8 remote_addr_3;
-#elif defined(__LITTLE_ENDIAN)
-	u8 remote_addr_3;
-	u8 remote_addr_2;
-	u8 remote_addr_1;
-	u8 remote_addr_0;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 reserved_vlan_type;
-	u16 params;
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
-#elif defined(__LITTLE_ENDIAN)
-	u16 params;
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
-#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
-	u16 reserved_vlan_type;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 local_addr_2;
-	u8 local_addr_3;
-	u8 local_addr_4;
-	u8 local_addr_5;
-#elif defined(__LITTLE_ENDIAN)
-	u8 local_addr_5;
-	u8 local_addr_4;
-	u8 local_addr_3;
-	u8 local_addr_2;
-#endif
-};
-
-/*
- * Flags used in FCoE context section - 1 byte
- */
-struct xstorm_fcoe_context_flags {
-	u8 flags;
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED (0x1<<3)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED_SHIFT 3
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED (0x1<<7)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED_SHIFT 7
-};
-
-/*
- * FCoE SQ element
- */
-struct fcoe_sqe {
-	u16 wqe;
-#define FCOE_SQE_TASK_ID (0x7FFF<<0)
-#define FCOE_SQE_TASK_ID_SHIFT 0
-#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
-#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
-};
-
-/*
- * FCoE XFRQ element
- */
-struct fcoe_xfrqe {
-	u16 wqe;
-#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
-#define FCOE_XFRQE_TASK_ID_SHIFT 0
-#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
-#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
-};
-
-/*
- * FCoE SQ\XFRQ element
- */
-struct fcoe_cached_wqe {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_xfrqe xfrqe;
-	struct fcoe_sqe sqe;
-#elif defined(__LITTLE_ENDIAN)
-	struct fcoe_sqe sqe;
-	struct fcoe_xfrqe xfrqe;
-#endif
-};
-
-struct fcoe_task_ctx_entry_tx_only {
-	union fcoe_sgl_ctx sgl_ctx;
-};
-
-struct xstorm_fcoe_task_ctx_entry_rd {
-	struct fcoe_task_ctx_entry_tx_only tx_wr;
-	struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
-	struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
-	struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
-};
-
-/*
- * Cached SGEs
- */
-struct common_fcoe_sgl {
-	struct fcoe_bd_ctx sge[2];
-};
-
-/*
- * FCP_DATA parameters required for transmission
- */
-struct xstorm_fcoe_fcp_data {
-	u32 io_rem;
-#if defined(__BIG_ENDIAN)
-	u16 cached_sge_off;
-	u8 cached_num_sges;
-	u8 cached_sge_idx;
-#elif defined(__LITTLE_ENDIAN)
-	u8 cached_sge_idx;
-	u8 cached_num_sges;
-	u16 cached_sge_off;
-#endif
-	struct common_fcoe_sgl cached_sgl;
-};
-
-/*
- * FCoE context section
- */
-struct xstorm_fcoe_context_section {
-#if defined(__BIG_ENDIAN)
-	u8 vlan_flag;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 s_id[3];
-	u8 vlan_flag;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 func_id;
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 d_id[3];
-	u8 func_id;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 sq_xfrq_lcq_confq_size;
-	u16 tx_max_fc_pay_len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 tx_max_fc_pay_len;
-	u16 sq_xfrq_lcq_confq_size;
-#endif
-	u32 lcq_prod;
-#if defined(__BIG_ENDIAN)
-	u8 port_id;
-	u8 tx_max_conc_seqs_c3;
-	u8 seq_id;
-	struct xstorm_fcoe_context_flags tx_flags;
-#elif defined(__LITTLE_ENDIAN)
-	struct xstorm_fcoe_context_flags tx_flags;
-	u8 seq_id;
-	u8 tx_max_conc_seqs_c3;
-	u8 port_id;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 verify_tx_seq;
-	u8 func_mode;
-	u8 vnic_id;
-#elif defined(__LITTLE_ENDIAN)
-	u8 vnic_id;
-	u8 func_mode;
-	u16 verify_tx_seq;
-#endif
-	struct regpair confq_curr_page_addr;
-	struct fcoe_cached_wqe cached_wqe[8];
-	struct regpair lcq_base_addr;
-	struct xstorm_fcoe_task_ctx_entry_rd tce;
-	struct xstorm_fcoe_fcp_data fcp_data;
-#if defined(__BIG_ENDIAN)
-	u16 fcoe_tx_stat_params_ram_addr;
-	u16 cmng_port_ram_addr;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cmng_port_ram_addr;
-	u16 fcoe_tx_stat_params_ram_addr;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 fcp_cmd_pb_cmd_size;
-	u8 eth_hdr_size;
-	u16 pbf_addr;
-#elif defined(__LITTLE_ENDIAN)
-	u16 pbf_addr;
-	u8 eth_hdr_size;
-	u8 fcp_cmd_pb_cmd_size;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 reserved2[2];
-	u8 cos;
-	u8 dcb_version;
-#elif defined(__LITTLE_ENDIAN)
-	u8 dcb_version;
-	u8 cos;
-	u8 reserved2[2];
-#endif
-	u32 reserved3;
-	struct regpair reserved4[2];
-};
-
-/*
- * Xstorm FCoE Storm Context
- */
-struct xstorm_fcoe_st_context {
-	struct xstorm_fcoe_eth_context_section eth;
-	struct xstorm_fcoe_context_section fcoe;
-};
-
-/*
- * Fcoe connection context
- */
-struct fcoe_context {
-	struct ustorm_fcoe_st_context ustorm_st_context;
-	struct tstorm_fcoe_st_context tstorm_st_context;
-	struct xstorm_fcoe_ag_context xstorm_ag_context;
-	struct tstorm_fcoe_ag_context tstorm_ag_context;
-	struct ustorm_fcoe_ag_context ustorm_ag_context;
-	struct timers_block_context timers_context;
-	struct xstorm_fcoe_st_context xstorm_st_context;
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct ustorm_iscsi_rq_db {
-	struct regpair pbl_base;
-	struct regpair curr_pbe;
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct ustorm_iscsi_r2tq_db {
-	struct regpair pbl_base;
-	struct regpair curr_pbe;
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct ustorm_iscsi_cq_db {
-#if defined(__BIG_ENDIAN)
-	u16 cq_sn;
-	u16 prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 prod;
-	u16 cq_sn;
-#endif
-	struct regpair curr_pbe;
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct rings_db {
-	struct ustorm_iscsi_rq_db rq;
-	struct ustorm_iscsi_r2tq_db r2tq;
-	struct ustorm_iscsi_cq_db cq[8];
-#if defined(__BIG_ENDIAN)
-	u16 rq_prod;
-	u16 r2tq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 r2tq_prod;
-	u16 rq_prod;
-#endif
-	struct regpair cq_pbl_base;
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct ustorm_iscsi_placement_db {
-	u32 sgl_base_lo;
-	u32 sgl_base_hi;
-	u32 local_sge_0_address_hi;
-	u32 local_sge_0_address_lo;
-#if defined(__BIG_ENDIAN)
-	u16 curr_sge_offset;
-	u16 local_sge_0_size;
-#elif defined(__LITTLE_ENDIAN)
-	u16 local_sge_0_size;
-	u16 curr_sge_offset;
-#endif
-	u32 local_sge_1_address_hi;
-	u32 local_sge_1_address_lo;
-#if defined(__BIG_ENDIAN)
-	u16 reserved6;
-	u16 local_sge_1_size;
-#elif defined(__LITTLE_ENDIAN)
-	u16 local_sge_1_size;
-	u16 reserved6;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 sgl_size;
-	u8 local_sge_index_2b;
-	u16 reserved7;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved7;
-	u8 local_sge_index_2b;
-	u8 sgl_size;
-#endif
-	u32 rem_pdu;
-	u32 place_db_bitfield_1;
-#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
-#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
-#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
-#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
-	u32 place_db_bitfield_2;
-#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
-#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
-#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
-#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
-	u32 nal;
-#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
-#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24)
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26)
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26
-#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29)
-#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29
-};
-
-/*
- * Ustorm iSCSI Storm Context
- */
-struct ustorm_iscsi_st_context {
-	u32 exp_stat_sn;
-	u32 exp_data_sn;
-	struct rings_db ring;
-	struct regpair task_pbl_base;
-	struct regpair tce_phy_addr;
-	struct ustorm_iscsi_placement_db place_db;
-	u32 reserved8;
-	u32 rem_rcv_len;
-#if defined(__BIG_ENDIAN)
-	u16 hdr_itt;
-	u16 iscsi_conn_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 iscsi_conn_id;
-	u16 hdr_itt;
-#endif
-	u32 nal_bytes;
-#if defined(__BIG_ENDIAN)
-	u8 hdr_second_byte_union;
-	u8 bitfield_0;
-#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
-#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
-#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
-#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
-#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
-#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
-	u8 task_pdu_cache_index;
-	u8 task_pbe_cache_index;
-#elif defined(__LITTLE_ENDIAN)
-	u8 task_pbe_cache_index;
-	u8 task_pdu_cache_index;
-	u8 bitfield_0;
-#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
-#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
-#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
-#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
-#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
-#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
-#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
-	u8 hdr_second_byte_union;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 reserved3;
-	u8 reserved2;
-	u8 acDecrement;
-#elif defined(__LITTLE_ENDIAN)
-	u8 acDecrement;
-	u8 reserved2;
-	u16 reserved3;
-#endif
-	u32 task_stat;
-#if defined(__BIG_ENDIAN)
-	u8 hdr_opcode;
-	u8 num_cqs;
-	u16 reserved5;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved5;
-	u8 num_cqs;
-	u8 hdr_opcode;
-#endif
-	u32 negotiated_rx;
-#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
-#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
-#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
-#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
-	u32 negotiated_rx_and_flags;
-#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
-#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
-#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
-#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
-#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
-#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
-#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
-#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
-#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
-#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
-#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
-#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
-#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
-#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
-#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
-#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
-};
-
-/*
- * TCP context region, shared in TOE, RDMA and ISCSI
- */
-struct tstorm_tcp_st_context_section {
-	u32 flags1;
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
-#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
-#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
-#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
-#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
-#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
-#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
-#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
-	u32 flags2;
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
-#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
-#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
-#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
-#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
-#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
-#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
-#if defined(__BIG_ENDIAN)
-	u16 mss;
-	u8 tcp_sm_state;
-	u8 rto_exp;
-#elif defined(__LITTLE_ENDIAN)
-	u8 rto_exp;
-	u8 tcp_sm_state;
-	u16 mss;
-#endif
-	u32 rcv_nxt;
-	u32 timestamp_recent;
-	u32 timestamp_recent_time;
-	u32 cwnd;
-	u32 ss_thresh;
-	u32 cwnd_accum;
-	u32 prev_seg_seq;
-	u32 expected_rel_seq;
-	u32 recover;
-#if defined(__BIG_ENDIAN)
-	u8 retransmit_count;
-	u8 ka_max_probe_count;
-	u8 persist_probe_count;
-	u8 ka_probe_count;
-#elif defined(__LITTLE_ENDIAN)
-	u8 ka_probe_count;
-	u8 persist_probe_count;
-	u8 ka_max_probe_count;
-	u8 retransmit_count;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 statistics_counter_id;
-	u8 ooo_support_mode;
-	u8 snd_wnd_scale;
-	u8 dup_ack_count;
-#elif defined(__LITTLE_ENDIAN)
-	u8 dup_ack_count;
-	u8 snd_wnd_scale;
-	u8 ooo_support_mode;
-	u8 statistics_counter_id;
-#endif
-	u32 retransmit_start_time;
-	u32 ka_timeout;
-	u32 ka_interval;
-	u32 isle_start_seq;
-	u32 isle_end_seq;
-#if defined(__BIG_ENDIAN)
-	u16 second_isle_address;
-	u16 recent_seg_wnd;
-#elif defined(__LITTLE_ENDIAN)
-	u16 recent_seg_wnd;
-	u16 second_isle_address;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 max_isles_ever_happened;
-	u8 isles_number;
-	u16 last_isle_address;
-#elif defined(__LITTLE_ENDIAN)
-	u16 last_isle_address;
-	u8 isles_number;
-	u8 max_isles_ever_happened;
-#endif
-	u32 max_rt_time;
-#if defined(__BIG_ENDIAN)
-	u16 lsb_mac_address;
-	u16 vlan_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 vlan_id;
-	u16 lsb_mac_address;
-#endif
-	u32 msb_mac_address;
-	u32 rightmost_received_seq;
-};
-
-/*
- * Termination variables
- */
-struct iscsi_term_vars {
-	u8 BitMap;
-#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
-#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
-#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
-#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
-#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
-#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
-#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
-#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
-#define ISCSI_TERM_VARS_RSRV (0x1<<7)
-#define ISCSI_TERM_VARS_RSRV_SHIFT 7
-};
-
-/*
- * iSCSI context region, used only in iSCSI
- */
-struct tstorm_iscsi_st_context_section {
-#if defined(__BIG_ENDIAN)
-	u16 rem_tcp_data_len;
-	u16 brb_offset;
-#elif defined(__LITTLE_ENDIAN)
-	u16 brb_offset;
-	u16 rem_tcp_data_len;
-#endif
-	u32 b2nh;
-#if defined(__BIG_ENDIAN)
-	u16 rq_cons;
-	u8 flags;
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
-	u8 hdr_bytes_2_fetch;
-#elif defined(__LITTLE_ENDIAN)
-	u8 hdr_bytes_2_fetch;
-	u8 flags;
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
-	u16 rq_cons;
-#endif
-	struct regpair rq_db_phy_addr;
-#if defined(__BIG_ENDIAN)
-	struct iscsi_term_vars term_vars;
-	u8 scratchpad_idx;
-	u16 iscsi_conn_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 iscsi_conn_id;
-	u8 scratchpad_idx;
-	struct iscsi_term_vars term_vars;
-#endif
-	u32 process_nxt;
-};
-
-/*
- * The iSCSI non-aggregative context of Tstorm
- */
-struct tstorm_iscsi_st_context {
-	struct tstorm_tcp_st_context_section tcp;
-	struct tstorm_iscsi_st_context_section iscsi;
-};
 
 /*
  * The tcp aggregative context section of Xstorm
  */
 struct xstorm_tcp_tcp_ag_context_section {
 #if defined(__BIG_ENDIAN)
-	u8 __tcp_agg_vars1;
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
 	u8 __da_cnt;
 	u16 mss;
 #elif defined(__LITTLE_ENDIAN)
 	u16 mss;
 	u8 __da_cnt;
-	u8 __tcp_agg_vars1;
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
 #endif
 	u32 snd_nxt;
 	u32 tx_wnd;
@@ -2615,7 +1774,7 @@
 	u32 local_adv_wnd;
 #if defined(__BIG_ENDIAN)
 	u8 __agg_val8_th;
-	u8 __agg_val8;
+	u8 __tx_dest;
 	u16 tcp_agg_vars2;
 #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
 #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
@@ -2631,8 +1790,8 @@
 #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -2641,8 +1800,8 @@
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
 #elif defined(__LITTLE_ENDIAN)
 	u16 tcp_agg_vars2;
 #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
@@ -2659,8 +1818,8 @@
 #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -2669,9 +1828,9 @@
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
-	u8 __agg_val8;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
 	u8 __agg_val8_th;
 #endif
 	u32 ack_to_far_end;
@@ -2699,8 +1858,8 @@
 	u32 tcp_agg_vars6;
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
 #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
@@ -2997,321 +2156,2116 @@
 	u32 rst_seq_num;
 };
 
-/*
- * The tcp aggregative context section of Tstorm
- */
-struct tstorm_tcp_tcp_ag_context_section {
-	u32 __agg_val1;
-#if defined(__BIG_ENDIAN)
-	u8 __tcp_agg_vars2;
-	u8 __agg_val3;
-	u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
-	u16 __agg_val2;
-	u8 __agg_val3;
-	u8 __tcp_agg_vars2;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 __agg_val5;
-	u8 __agg_val6;
-	u8 __tcp_agg_vars3;
-#elif defined(__LITTLE_ENDIAN)
-	u8 __tcp_agg_vars3;
-	u8 __agg_val6;
-	u16 __agg_val5;
-#endif
-	u32 snd_nxt;
-	u32 rtt_seq;
-	u32 rtt_time;
-	u32 __reserved66;
-	u32 wnd_right_edge;
-	u32 tcp_agg_vars1;
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
-	u32 snd_max;
-	u32 snd_una;
-	u32 __reserved2;
-};
 
 /*
- * The iscsi aggregative context of Tstorm
+ * The L5cm aggregative context of XStorm
  */
-struct tstorm_iscsi_ag_context {
+struct xstorm_l5cm_ag_context {
 #if defined(__BIG_ENDIAN)
-	u16 ulp_credit;
+	u16 agg_val1;
 	u8 agg_vars1;
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
 	u8 state;
 #elif defined(__LITTLE_ENDIAN)
 	u8 state;
 	u8 agg_vars1;
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
-	u16 ulp_credit;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
 #endif
 #if defined(__BIG_ENDIAN)
-	u16 __agg_val4;
-	u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
-#elif defined(__LITTLE_ENDIAN)
-	u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
-	u16 __agg_val4;
-#endif
-	struct tstorm_tcp_tcp_ag_context_section tcp;
-};
-
-/*
- * The iscsi aggregative context of Ustorm
- */
-struct ustorm_iscsi_ag_context {
-#if defined(__BIG_ENDIAN)
-	u8 __aux_counter_flags;
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
 	u8 agg_vars2;
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
-	u8 agg_vars1;
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
-	u8 state;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
 #elif defined(__LITTLE_ENDIAN)
-	u8 state;
-	u8 agg_vars1;
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
 	u8 agg_vars2;
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
-	u8 __aux_counter_flags;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 agg_val4_th;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4_th;
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
 #endif
 #if defined(__BIG_ENDIAN)
-	u8 cdu_usage;
-	u8 agg_misc2;
-	u16 __cq_local_comp_itt_val;
+	u16 __agg_val11_th;
+	u16 __gen_data;
 #elif defined(__LITTLE_ENDIAN)
-	u16 __cq_local_comp_itt_val;
-	u8 agg_misc2;
-	u8 cdu_usage;
+	u16 __gen_data;
+	u16 __agg_val11_th;
 #endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+	u32 agg_vars8;
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 agg_misc0;
+	u16 agg_val4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4;
+	u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 completion_seq;
 	u32 agg_misc4;
+	u32 rst_seq_num;
+};
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+	__le16 aborted_task_id;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+	u8 r_ctl;
+	u8 rsrv[3];
+	__le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+	__le32 buf_addr_hi;
+	__le32 buf_addr_lo;
+	__le16 buf_len;
+	__le16 rsrv0;
+	__le16 flags;
+	__le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+	struct regpair cur_buf_addr;
+	__le16 cur_buf_rem;
+	__le16 second_buf_rem;
+	struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+	__le16 cleaned_task_id;
+	__le16 rolled_tx_seq_cnt;
+	__le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+	u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+	struct regpair reserved0;
+	__le32 fcp_resid;
+	u8 scsi_status_code;
+	struct fcoe_fcp_rsp_flags fcp_flags;
+	__le16 retry_delay_timer;
+	__le32 fcp_rsp_len;
+	__le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+	struct fcoe_fcp_rsp_payload payload;
+	struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+	u8 s_id[3];
+	u8 cs_ctl;
+	u8 d_id[3];
+	u8 r_ctl;
+	__le16 seq_cnt;
+	u8 df_ctl;
+	u8 seq_id;
+	u8 f_ctl[3];
+	u8 type;
+	__le32 parameters;
+	__le16 rx_id;
+	__le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 mp_payload_len;
+	__le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+	struct fcoe_fcp_rsp_union fcp_rsp;
+	struct fcoe_abts_rsp_union abts_rsp;
+	struct fcoe_mp_rsp_union mp_rsp;
+	__le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+	__le32 rsrv0[6];
+	struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+	__le32 rsrv0[6];
+	struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+	__le32 data_offset;
+	__le16 seq_cnt;
+	__le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+	__le32 rsrv0[6];
+	struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+	struct regpair cur_sge_addr;
+	__le16 cur_sge_off;
+	u8 cur_sge_idx;
+	u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+	struct fcoe_mul_sges_ctx mul_sgl;
+	struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+	__le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+	__le32 burst_len;
+	__le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+	__le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+	__le32 fcoe_conn_id;
+	__le32 completion_status;
+	__le32 fcoe_conn_context_id;
+	union fcoe_kcqe_params params;
+	__le16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+	u8 op_code;
+	u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+	__le16 num_tasks;
+	struct fcoe_kwqe_header hdr;
+	__le32 task_list_pbl_addr_lo;
+	__le32 task_list_pbl_addr_hi;
+	__le32 dummy_buffer_addr_lo;
+	__le32 dummy_buffer_addr_hi;
+	__le16 sq_num_wqes;
+	__le16 rq_num_wqes;
+	__le16 rq_buffer_log_size;
+	__le16 cq_num_wqes;
+	__le16 mtu;
+	u8 num_sessions_log;
+	u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+	u8 hsi_major_version;
+	u8 hsi_minor_version;
+	struct fcoe_kwqe_header hdr;
+	__le32 hash_tbl_pbl_addr_lo;
+	__le32 hash_tbl_pbl_addr_hi;
+	__le32 t2_hash_tbl_addr_lo;
+	__le32 t2_hash_tbl_addr_hi;
+	__le32 t2_ptr_hash_tbl_addr_lo;
+	__le32 t2_ptr_hash_tbl_addr_hi;
+	__le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 error_bit_map_lo;
+	__le32 error_bit_map_hi;
+	u8 perf_config;
+	u8 reserved21[3];
+	__le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+	__le16 fcoe_conn_id;
+	struct fcoe_kwqe_header hdr;
+	__le32 sq_addr_lo;
+	__le32 sq_addr_hi;
+	__le32 rq_pbl_addr_lo;
+	__le32 rq_pbl_addr_hi;
+	__le32 rq_first_pbe_addr_lo;
+	__le32 rq_first_pbe_addr_hi;
+	__le16 rq_prod;
+	__le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+	__le16 tx_max_fc_pay_len;
+	struct fcoe_kwqe_header hdr;
+	__le32 cq_addr_lo;
+	__le32 cq_addr_hi;
+	__le32 xferq_addr_lo;
+	__le32 xferq_addr_hi;
+	__le32 conn_db_addr_lo;
+	__le32 conn_db_addr_hi;
+	__le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+	__le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+	struct fcoe_kwqe_header hdr;
+	u8 s_id[3];
+	u8 tx_max_conc_seqs_c3;
+	u8 d_id[3];
+	u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+	__le32 reserved;
+	__le32 confq_first_pbe_addr_lo;
+	__le32 confq_first_pbe_addr_hi;
+	__le16 tx_total_conc_seqs;
+	__le16 rx_max_fc_pay_len;
+	__le16 rx_total_conc_seqs;
+	u8 rx_max_conc_seqs_c3;
+	u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+	u8 e_d_tov_timer_val;
+	u8 reserved2;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u8 dst_mac_addr_hi[2];
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	__le32 lcq_addr_lo;
+	__le32 lcq_addr_hi;
+	__le32 confq_pbl_base_addr_lo;
+	__le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	u8 dst_mac_addr_hi[2];
+	__le16 reserved1;
+	u8 s_id[3];
+	u8 vlan_flag;
+	u8 d_id[3];
+	u8 reserved3;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 stat_params_addr_lo;
+	__le32 stat_params_addr_hi;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+	struct fcoe_kwqe_init1 init1;
+	struct fcoe_kwqe_init2 init2;
+	struct fcoe_kwqe_init3 init3;
+	struct fcoe_kwqe_conn_offload1 conn_offload1;
+	struct fcoe_kwqe_conn_offload2 conn_offload2;
+	struct fcoe_kwqe_conn_offload3 conn_offload3;
+	struct fcoe_kwqe_conn_offload4 conn_offload4;
+	struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+	struct fcoe_kwqe_conn_destroy conn_destroy;
+	struct fcoe_kwqe_destroy destroy;
+	struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+	struct fcoe_cached_sge_ctx cached_sge;
+	struct fcoe_ext_mul_sges_ctx sgl;
+	__le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+	u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+	u8 seq_id;
+	struct fcoe_s_stat_ctx s_stat;
+	__le16 seq_cnt;
+	__le32 low_exp_ro;
+	__le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+	struct fcoe_read_flow_info read_info;
+	union fcoe_comp_flow_info comp_info;
+	__le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+	__le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+	struct fcoe_fc_frame tx_frame;
+	struct fcoe_fcp_cmd_payload fcp_cmd;
+	struct fcoe_ext_cleanup_info cleanup;
+	struct fcoe_ext_abts_info abts;
+	struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+	__le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+	u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+	u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+	__le16 rsrv3;
+	__le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+	union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+	__le32 data_2_trns;
+	__le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+	__le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+	__le16 rx_id;
+	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+	struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+	struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+	struct fcoe_rx_seq_ctx rx_seq_ctx;
+	union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+	struct fcoe_tce_tx_only txwr_only;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+	__le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * Cached SGEs $$KEEP_ENDIANNESS$$
+ */
+struct common_fcoe_sgl {
+	struct fcoe_bd_ctx sge[3];
+};
+
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+	struct fcoe_sqe sqe;
+	struct fcoe_xfrqe xfrqe;
+};
+
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+	struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_offload_ramrod_params {
+	struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+	struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+	struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+	struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+
+struct ustorm_fcoe_mng_ctx {
 #if defined(__BIG_ENDIAN)
-	u8 agg_val3_th;
-	u8 agg_val3;
-	u16 agg_misc3;
+	u8 mid_seq_proc_flag;
+	u8 tce_in_cam_flag;
+	u8 tce_on_ior_flag;
+	u8 en_cached_tce_flag;
 #elif defined(__LITTLE_ENDIAN)
-	u16 agg_misc3;
-	u8 agg_val3;
-	u8 agg_val3_th;
-#endif
-	u32 agg_val1;
-	u32 agg_misc4_th;
-#if defined(__BIG_ENDIAN)
-	u16 agg_val2_th;
-	u16 agg_val2;
-#elif defined(__LITTLE_ENDIAN)
-	u16 agg_val2;
-	u16 agg_val2_th;
+	u8 en_cached_tce_flag;
+	u8 tce_on_ior_flag;
+	u8 tce_in_cam_flag;
+	u8 mid_seq_proc_flag;
 #endif
 #if defined(__BIG_ENDIAN)
-	u16 __reserved2;
-	u8 decision_rules;
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
-	u8 decision_rule_enable_bits;
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 tce_cam_addr;
+	u8 cached_conn_flag;
+	u16 rsrv0;
 #elif defined(__LITTLE_ENDIAN)
-	u8 decision_rule_enable_bits;
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
-	u8 decision_rules;
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
-	u16 __reserved2;
+	u16 rsrv0;
+	u8 cached_conn_flag;
+	u8 tce_cam_addr;
 #endif
+#if defined(__BIG_ENDIAN)
+	u16 dma_tce_ram_addr;
+	u16 tce_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tce_ram_addr;
+	u16 dma_tce_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 wr_done_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 wr_done_seq;
+	u16 ox_id;
+#endif
+	struct regpair task_addr;
+};
+
+/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and
+ * used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_conn_id;
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+	u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_byte_en;
+	u8 func_id;
+	u8 port_id;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 port_id;
+	u8 func_id;
+	u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rx_total_conc_seqs;
+	u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_fc_pay_len;
+	u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 task_pbe_idx_off;
+	u8 task_in_page_log_size;
+	u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_conc_seqs;
+	u8 task_in_page_log_size;
+	u8 task_pbe_idx_off;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+	u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+	struct fcoe_idx16_fields fields;
+	u16 val;
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place_mng {
+#if defined(__BIG_ENDIAN)
+	u16 sge_off;
+	u8 num_sges;
+	u8 sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sge_idx;
+	u8 num_sges;
+	u16 sge_off;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+	struct ustorm_fcoe_data_place_mng cached_mng;
+	struct fcoe_bd_ctx cached_sge[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+union fcoe_u_tce_tx_wr_rx_rd_union {
+	struct fcoe_abts_info abts;
+	struct fcoe_cleanup_info cleanup;
+	struct fcoe_fw_tx_seq_ctx tx_seq_ctx;
+	u32 opaque[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+struct fcoe_u_tce_tx_wr_rx_rd {
+	union fcoe_u_tce_tx_wr_rx_rd_union union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+struct ustorm_fcoe_tce {
+	struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr;
+};
+
+struct ustorm_fcoe_cache_ctx {
+	u32 rsrv0;
+	struct ustorm_fcoe_data_place data_place;
+	struct ustorm_fcoe_tce tce;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+	struct ustorm_fcoe_mng_ctx mng_ctx;
+	struct ustorm_fcoe_params fcoe_params;
+	struct regpair cq_base_addr;
+	struct regpair rq_pbl_base;
+	struct regpair rq_cur_page_addr;
+	struct regpair confq_pbl_base_addr;
+	struct regpair conn_db_base;
+	struct regpair xfrq_base_addr;
+	struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+	union fcoe_idx16_field_union rq_cons;
+	union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	union fcoe_idx16_field_union rq_prod;
+	union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xfrq_prod;
+	u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_cons;
+	u16 xfrq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 lcq_cons;
+	u16 hc_cram_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hc_cram_address;
+	u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 confq_prod;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_agg_int;
+	u8 rsrv2;
+	u8 available_rqes;
+	u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sp_q_flush_cnt;
+	u8 available_rqes;
+	u8 rsrv2;
+	u8 hc_csdm_agg_int;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 num_pend_tasks;
+	u16 pbf_ack_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_ack_ram_addr;
+	u16 num_pend_tasks;
+#endif
+	struct ustorm_fcoe_cache_ctx cache_ctx;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+	struct regpair reserved0;
+	struct regpair reserved1;
+};
+
+/*
+ * Ethernet context section
+ */
+struct xstorm_fcoe_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * Flags used in FCoE context section - 1 byte
+ */
+struct xstorm_fcoe_context_flags {
+	u8 flags;
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7
+};
+
+struct xstorm_fcoe_tce {
+	struct fcoe_tce_tx_only txwr;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+};
+
+/*
+ * FCP_DATA parameters required for transmission
+ */
+struct xstorm_fcoe_fcp_data {
+	u32 io_rem;
+#if defined(__BIG_ENDIAN)
+	u16 cached_sge_off;
+	u8 cached_num_sges;
+	u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cached_sge_idx;
+	u8 cached_num_sges;
+	u16 cached_sge_off;
+#endif
+	u32 buf_addr_hi_0;
+	u32 buf_addr_lo_0;
+#if defined(__BIG_ENDIAN)
+	u16 num_of_pending_tasks;
+	u16 buf_len_0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_0;
+	u16 num_of_pending_tasks;
+#endif
+	u32 buf_addr_hi_1;
+	u32 buf_addr_lo_1;
+#if defined(__BIG_ENDIAN)
+	u16 task_pbe_idx_off;
+	u16 buf_len_1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_1;
+	u16 task_pbe_idx_off;
+#endif
+	u32 buf_addr_hi_2;
+	u32 buf_addr_lo_2;
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 buf_len_2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_2;
+	u16 ox_id;
+#endif
+};
+
+/*
+ * vlan configuration
+ */
+struct xstorm_fcoe_vlan_conf {
+	u8 vlan_conf;
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0)
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3)
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3
+#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4)
+#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4
+};
+
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+	u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+/*
+ * FCoE 16-bits vlan union
+ */
+union fcoe_vlan_field_union {
+	struct fcoe_vlan_fields fields;
+	u16 val;
+};
+
+/*
+ * FCoE 16-bits vlan, vif union
+ */
+union fcoe_vlan_vif_field_union {
+	union fcoe_vlan_field_union vlan;
+	u16 vif;
+};
+
+/*
+ * FCoE context section
+ */
+struct xstorm_fcoe_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 cs_ctl;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rctl;
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 rctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tx_max_fc_pay_len;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+	u32 lcq_prod;
+#if defined(__BIG_ENDIAN)
+	u8 port_id;
+	u8 func_id;
+	u8 seq_id;
+	struct xstorm_fcoe_context_flags tx_flags;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_context_flags tx_flags;
+	u8 seq_id;
+	u8 func_id;
+	u8 port_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mtu;
+	u8 func_mode;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 func_mode;
+	u16 mtu;
+#endif
+	struct regpair confq_curr_page_addr;
+	struct fcoe_cached_wqe cached_wqe[8];
+	struct regpair lcq_base_addr;
+	struct xstorm_fcoe_tce tce;
+	struct xstorm_fcoe_fcp_data fcp_data;
+#if defined(__BIG_ENDIAN)
+	u8 tx_max_conc_seqs_c3;
+	u8 vlan_flag;
+	u8 dcb_val;
+	u8 data_pb_cmd_size;
+#elif defined(__LITTLE_ENDIAN)
+	u8 data_pb_cmd_size;
+	u8 dcb_val;
+	u8 vlan_flag;
+	u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_tx_stat_params_ram_addr;
+	u16 fcoe_tx_fc_seq_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 fcoe_tx_fc_seq_ram_addr;
+	u16 fcoe_tx_stat_params_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 fcp_cmd_line_credit;
+	u8 eth_hdr_size;
+	u16 pbf_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr;
+	u8 eth_hdr_size;
+	u8 fcp_cmd_line_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	union fcoe_vlan_vif_field_union multi_func_val;
+	u8 page_log_size;
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+	u8 page_log_size;
+	union fcoe_vlan_vif_field_union multi_func_val;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcp_cmd_frame_size;
+	u16 pbf_addr_ff;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr_ff;
+	u16 fcp_cmd_frame_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 vlan_num;
+	u8 cos;
+	u8 cache_xfrq_cons;
+	u8 cache_sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cache_sq_cons;
+	u8 cache_xfrq_cons;
+	u8 cos;
+	u8 vlan_num;
+#endif
+	u32 verify_tx_seq;
+};
+
+/*
+ * Xstorm FCoE Storm Context
+ */
+struct xstorm_fcoe_st_context {
+	struct xstorm_fcoe_eth_context_section eth;
+	struct xstorm_fcoe_context_section fcoe;
+};
+
+/*
+ * Fcoe connection context
+ */
+struct fcoe_context {
+	struct ustorm_fcoe_st_context ustorm_st_context;
+	struct tstorm_fcoe_st_context tstorm_st_context;
+	struct xstorm_fcoe_ag_context xstorm_ag_context;
+	struct tstorm_fcoe_ag_context tstorm_ag_context;
+	struct ustorm_fcoe_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_fcoe_st_context xstorm_st_context;
+};
+
+/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_init_ramrod_params {
+	struct fcoe_kwqe_init1 init_kwqe1;
+	struct fcoe_kwqe_init2 init_kwqe2;
+	struct fcoe_kwqe_init3 init_kwqe3;
+	struct regpair eq_pbl_base;
+	__le32 eq_pbl_size;
+	__le32 reserved2;
+	__le16 eq_prod;
+	__le16 sb_num;
+	u8 sb_id;
+	u8 reserved0;
+	__le16 reserved1;
+};
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_stat_ramrod_params {
+	struct fcoe_kwqe_stat stat_kwqe;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+	u16 cntr;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+	u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe;
+	struct regpair task_pbl_base;
+	struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+	u16 hq_bd_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hq_bd_itt;
+#endif
+	u32 hq_bd_data_segment_len;
+	u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv;
+	u8 cq_proc_en_bit_map;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hq_bd_opcode;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 cq_proc_en_bit_map;
+	u8 rsrv;
+#endif
+	u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+	struct regpair rsrv1;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 expected_data_transfer_length;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 scsi_command_block[4];
+};
+
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+	struct regpair reserved[8];
+};
+
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_rq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_r2tq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_cq_db {
+#if defined(__BIG_ENDIAN)
+	u16 cq_sn;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cq_sn;
+#endif
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct rings_db {
+	struct ustorm_iscsi_rq_db rq;
+	struct ustorm_iscsi_r2tq_db r2tq;
+	struct ustorm_iscsi_cq_db cq[8];
+#if defined(__BIG_ENDIAN)
+	u16 rq_prod;
+	u16 r2tq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 r2tq_prod;
+	u16 rq_prod;
+#endif
+	struct regpair cq_pbl_base;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_placement_db {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+	u32 local_sge_0_address_hi;
+	u32 local_sge_0_address_lo;
+#if defined(__BIG_ENDIAN)
+	u16 curr_sge_offset;
+	u16 local_sge_0_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_0_size;
+	u16 curr_sge_offset;
+#endif
+	u32 local_sge_1_address_hi;
+	u32 local_sge_1_address_lo;
+#if defined(__BIG_ENDIAN)
+	u8 exp_padding_2b;
+	u8 nal_len_3b;
+	u16 local_sge_1_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_1_size;
+	u8 nal_len_3b;
+	u8 exp_padding_2b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 local_sge_index_2b;
+	u16 reserved7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved7;
+	u8 local_sge_index_2b;
+	u8 sgl_size;
+#endif
+	u32 rem_pdu;
+	u32 place_db_bitfield_1;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
+	u32 place_db_bitfield_2;
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
+	u32 nal;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24
+};
+
+/*
+ * Ustorm iSCSI Storm Context
+ */
+struct ustorm_iscsi_st_context {
+	u32 exp_stat_sn;
+	u32 exp_data_sn;
+	struct rings_db ring;
+	struct regpair task_pbl_base;
+	struct regpair tce_phy_addr;
+	struct ustorm_iscsi_placement_db place_db;
+	u32 reserved8;
+	u32 rem_rcv_len;
+#if defined(__BIG_ENDIAN)
+	u16 hdr_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hdr_itt;
+#endif
+	u32 nal_bytes;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_second_byte_union;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 task_pdu_cache_index;
+	u8 task_pbe_cache_index;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbe_cache_index;
+	u8 task_pdu_cache_index;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 hdr_second_byte_union;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 acDecrement;
+#elif defined(__LITTLE_ENDIAN)
+	u8 acDecrement;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+	u32 task_stat;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_opcode;
+	u8 num_cqs;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 num_cqs;
+	u8 hdr_opcode;
+#endif
+	u32 negotiated_rx;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
+	u32 negotiated_rx_and_flags;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
+};
+
+/*
+ * TCP context region, shared in TOE, RDMA and ISCSI
+ */
+struct tstorm_tcp_st_context_section {
+	u32 flags1;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
+	u32 flags2;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u8 tcp_sm_state;
+	u8 rto_exp;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rto_exp;
+	u8 tcp_sm_state;
+	u16 mss;
+#endif
+	u32 rcv_nxt;
+	u32 timestamp_recent;
+	u32 timestamp_recent_time;
+	u32 cwnd;
+	u32 ss_thresh;
+	u32 cwnd_accum;
+	u32 prev_seg_seq;
+	u32 expected_rel_seq;
+	u32 recover;
+#if defined(__BIG_ENDIAN)
+	u8 retransmit_count;
+	u8 ka_max_probe_count;
+	u8 persist_probe_count;
+	u8 ka_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_probe_count;
+	u8 persist_probe_count;
+	u8 ka_max_probe_count;
+	u8 retransmit_count;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 statistics_counter_id;
+	u8 ooo_support_mode;
+	u8 snd_wnd_scale;
+	u8 dup_ack_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 dup_ack_count;
+	u8 snd_wnd_scale;
+	u8 ooo_support_mode;
+	u8 statistics_counter_id;
+#endif
+	u32 retransmit_start_time;
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 isle_start_seq;
+	u32 isle_end_seq;
+#if defined(__BIG_ENDIAN)
+	u16 second_isle_address;
+	u16 recent_seg_wnd;
+#elif defined(__LITTLE_ENDIAN)
+	u16 recent_seg_wnd;
+	u16 second_isle_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 max_isles_ever_happened;
+	u8 isles_number;
+	u16 last_isle_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 last_isle_address;
+	u8 isles_number;
+	u8 max_isles_ever_happened;
+#endif
+	u32 max_rt_time;
+#if defined(__BIG_ENDIAN)
+	u16 lsb_mac_address;
+	u16 vlan_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_id;
+	u16 lsb_mac_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 msb_mac_address;
+	u16 mid_mac_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mid_mac_address;
+	u16 msb_mac_address;
+#endif
+	u32 rightmost_received_seq;
+};
+
+/*
+ * Termination variables
+ */
+struct iscsi_term_vars {
+	u8 BitMap;
+#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
+#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define ISCSI_TERM_VARS_RSRV (0x1<<7)
+#define ISCSI_TERM_VARS_RSRV_SHIFT 7
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct tstorm_iscsi_st_context_section {
+	u32 nalPayload;
+	u32 b2nh;
+#if defined(__BIG_ENDIAN)
+	u16 rq_cons;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u8 hdr_bytes_2_fetch;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hdr_bytes_2_fetch;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u16 rq_cons;
+#endif
+	struct regpair rq_db_phy_addr;
+#if defined(__BIG_ENDIAN)
+	struct iscsi_term_vars term_vars;
+	u8 rsrv1;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u8 rsrv1;
+	struct iscsi_term_vars term_vars;
+#endif
+	u32 process_nxt;
+};
+
+/*
+ * The iSCSI non-aggregative context of Tstorm
+ */
+struct tstorm_iscsi_st_context {
+	struct tstorm_tcp_st_context_section tcp;
+	struct tstorm_iscsi_st_context_section iscsi;
 };
 
 /*
@@ -3509,7 +4463,27 @@
 	u16 window_scaling_factor;
 	u16 pseudo_csum;
 #endif
-	u32 reserved2;
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u8 statistics_counter_id;
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+	u8 statistics_counter_id;
+	u16 reserved2;
+#endif
 	u32 ts_time_diff;
 	u32 __next_timer_expir;
 };
@@ -3522,29 +4496,31 @@
 	union xstorm_ip_context_section_types ip_union;
 	struct xstorm_tcp_context_section tcp;
 #if defined(__BIG_ENDIAN)
-	u16 reserved;
-	u8 statistics_params;
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
+	u8 __dcb_val;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 reserved;
 	u8 ip_version_1b;
 #elif defined(__LITTLE_ENDIAN)
 	u8 ip_version_1b;
-	u8 statistics_params;
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
-	u16 reserved;
+	u8 reserved;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 __dcb_val;
 #endif
 };
 
@@ -3682,99 +4658,6 @@
 };
 
 /*
- * CQ DB CQ producer and pending completion counter
- */
-struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
-#if defined(__BIG_ENDIAN)
-	u16 cntr;
-	u16 prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 prod;
-	u16 cntr;
-#endif
-};
-
-/*
- * CQ DB pending completion ITT array
- */
-struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
-	struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
-};
-
-/*
- * Cstorm CQ sequence to notify array, updated by driver
- */
-struct iscsi_cq_db_sqn_2_notify_arr {
-	u16 sqn[8];
-};
-
-/*
- * Cstorm iSCSI Storm Context
- */
-struct cstorm_iscsi_st_context {
-	struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
-	struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
-	struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
-	struct regpair hq_pbl_base;
-	struct regpair hq_curr_pbe;
-	struct regpair task_pbl_base;
-	struct regpair cq_db_base;
-#if defined(__BIG_ENDIAN)
-	u16 hq_bd_itt;
-	u16 iscsi_conn_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 iscsi_conn_id;
-	u16 hq_bd_itt;
-#endif
-	u32 hq_bd_data_segment_len;
-	u32 hq_bd_buffer_offset;
-#if defined(__BIG_ENDIAN)
-	u8 timer_entry_idx;
-	u8 cq_proc_en_bit_map;
-	u8 cq_pend_comp_itt_valid_bit_map;
-	u8 hq_bd_opcode;
-#elif defined(__LITTLE_ENDIAN)
-	u8 hq_bd_opcode;
-	u8 cq_pend_comp_itt_valid_bit_map;
-	u8 cq_proc_en_bit_map;
-	u8 timer_entry_idx;
-#endif
-	u32 hq_tcp_seq;
-#if defined(__BIG_ENDIAN)
-	u16 flags;
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
-	u16 hq_cons;
-#elif defined(__LITTLE_ENDIAN)
-	u16 hq_cons;
-	u16 flags;
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
-#endif
-	struct regpair rsrv1;
-};
-
-/*
  * Iscsi connection context
  */
 struct iscsi_context {
@@ -3791,584 +4674,389 @@
 	struct cstorm_iscsi_st_context cstorm_st_context;
 };
 
+
 /*
- * FCoE KCQ CQE parameters
+ * PDU header of an iSCSI DATA-OUT
  */
-union fcoe_kcqe_params {
-	u32 reserved0[4];
+struct iscsi_data_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 rsrv2;
+	u32 exp_stat_sn;
+	u32 rsrv3;
+	u32 data_sn;
+	u32 buffer_offset;
+	u32 rsrv4;
+};
+
+
+/*
+ * PDU header of an iSCSI login request
+ */
+struct iscsi_login_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv2[4];
 };
 
 /*
- * FCoE KCQ CQE
+ * PDU header of an iSCSI logout request
  */
-struct fcoe_kcqe {
-	u32 fcoe_conn_id;
-	u32 completion_status;
-	u32 fcoe_conn_context_id;
-	union fcoe_kcqe_params params;
+struct iscsi_logout_req_hdr_little_endian {
 #if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
-	u8 op_code;
-	u16 qe_self_seq;
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
 #elif defined(__LITTLE_ENDIAN)
-	u16 qe_self_seq;
-	u8 op_code;
-	u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
 #endif
+	u32 data_fields;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 rsrv2[2];
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
 };
 
 /*
- * FCoE KWQE header
+ * PDU header of an iSCSI TMF request
  */
-struct fcoe_kwqe_header {
+struct iscsi_tmf_req_hdr_little_endian {
 #if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
-	u8 op_code;
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
 #elif defined(__LITTLE_ENDIAN)
-	u8 op_code;
-	u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
 #endif
+	u32 data_fields;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 referenced_task_tag;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 ref_cmd_sn;
+	u32 exp_data_sn;
+	u32 rsrv2[2];
 };
 
 /*
- * FCoE firmware init request 1
+ * PDU header of an iSCSI Text request
  */
-struct fcoe_kwqe_init1 {
+struct iscsi_text_req_hdr_little_endian {
 #if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 num_tasks;
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u16 rsrv0;
 #elif defined(__LITTLE_ENDIAN)
-	u16 num_tasks;
-	struct fcoe_kwqe_header hdr;
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u8 opcode;
 #endif
-	u32 task_list_pbl_addr_lo;
-	u32 task_list_pbl_addr_hi;
-	u32 dummy_buffer_addr_lo;
-	u32 dummy_buffer_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 rq_num_wqes;
-	u16 sq_num_wqes;
-#elif defined(__LITTLE_ENDIAN)
-	u16 sq_num_wqes;
-	u16 rq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 cq_num_wqes;
-	u16 rq_buffer_log_size;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rq_buffer_log_size;
-	u16 cq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
-	u8 num_sessions_log;
-	u16 mtu;
-#elif defined(__LITTLE_ENDIAN)
-	u16 mtu;
-	u8 num_sessions_log;
-	u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
-#endif
+	u32 data_fields;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
 };
 
 /*
- * FCoE firmware init request 2
+ * PDU header of an iSCSI Nop-Out
  */
-struct fcoe_kwqe_init2 {
+struct iscsi_nop_out_hdr_little_endian {
 #if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u16 rsrv0;
 #elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u8 opcode;
 #endif
-	u32 hash_tbl_pbl_addr_lo;
-	u32 hash_tbl_pbl_addr_hi;
-	u32 t2_hash_tbl_addr_lo;
-	u32 t2_hash_tbl_addr_hi;
-	u32 t2_ptr_hash_tbl_addr_lo;
-	u32 t2_ptr_hash_tbl_addr_hi;
-	u32 free_list_count;
+	u32 data_fields;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
 };
 
 /*
- * FCoE firmware init request 3
+ * iscsi pdu headers in little endian form.
  */
-struct fcoe_kwqe_init3 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 error_bit_map_lo;
-	u32 error_bit_map_hi;
-#if defined(__BIG_ENDIAN)
-	u8 reserved21[3];
-	u8 cached_session_enable;
-#elif defined(__LITTLE_ENDIAN)
-	u8 cached_session_enable;
-	u8 reserved21[3];
-#endif
-	u32 reserved2[4];
+union iscsi_pdu_headers_little_endian {
+	u32 fullHeaderSize[12];
+	struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr;
+	struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr;
+	struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr;
+	struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr;
+	struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr;
+	struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr;
+	struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr;
 };
 
-/*
- * FCoE connection offload request 1
- */
-struct fcoe_kwqe_conn_offload1 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 fcoe_conn_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 fcoe_conn_id;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 sq_addr_lo;
-	u32 sq_addr_hi;
-	u32 rq_pbl_addr_lo;
-	u32 rq_pbl_addr_hi;
-	u32 rq_first_pbe_addr_lo;
-	u32 rq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 reserved0;
-	u16 rq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rq_prod;
-	u16 reserved0;
-#endif
-};
-
-/*
- * FCoE connection offload request 2
- */
-struct fcoe_kwqe_conn_offload2 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 tx_max_fc_pay_len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 tx_max_fc_pay_len;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 cq_addr_lo;
-	u32 cq_addr_hi;
-	u32 xferq_addr_lo;
-	u32 xferq_addr_hi;
-	u32 conn_db_addr_lo;
-	u32 conn_db_addr_hi;
-	u32 reserved1;
-};
-
-/*
- * FCoE connection offload request 3
- */
-struct fcoe_kwqe_conn_offload3 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
-#elif defined(__LITTLE_ENDIAN)
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
-	struct fcoe_kwqe_header hdr;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 tx_max_conc_seqs_c3;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 s_id[3];
-	u8 tx_max_conc_seqs_c3;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 d_id[3];
-	u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-#endif
-	u32 reserved;
-	u32 confq_first_pbe_addr_lo;
-	u32 confq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 rx_max_fc_pay_len;
-	u16 tx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
-	u16 tx_total_conc_seqs;
-	u16 rx_max_fc_pay_len;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 rx_open_seqs_exch_c3;
-	u8 rx_max_conc_seqs_c3;
-	u16 rx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_total_conc_seqs;
-	u8 rx_max_conc_seqs_c3;
-	u8 rx_open_seqs_exch_c3;
-#endif
-};
-
-/*
- * FCoE connection offload request 4
- */
-struct fcoe_kwqe_conn_offload4 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u8 reserved2;
-	u8 e_d_tov_timer_val;
-#elif defined(__LITTLE_ENDIAN)
-	u8 e_d_tov_timer_val;
-	u8 reserved2;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
-	u8 dst_mac_addr_hi16[2];
-	u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
-	u8 src_mac_addr_hi16[2];
-	u8 dst_mac_addr_hi16[2];
-#endif
-	u8 dst_mac_addr_lo32[4];
-	u32 lcq_addr_lo;
-	u32 lcq_addr_hi;
-	u32 confq_pbl_base_addr_lo;
-	u32 confq_pbl_base_addr_hi;
-};
-
-/*
- * FCoE connection enable request
- */
-struct fcoe_kwqe_conn_enable_disable {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-	u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
-	u8 src_mac_addr_hi16[2];
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-#endif
-	u8 dst_mac_addr_lo32[4];
+struct iscsi_hq_bd {
+	union iscsi_pdu_headers_little_endian pdu_header;
 #if defined(__BIG_ENDIAN)
 	u16 reserved1;
-	u8 dst_mac_addr_hi16[2];
+	u16 lcl_cmp_flg;
 #elif defined(__LITTLE_ENDIAN)
-	u8 dst_mac_addr_hi16[2];
+	u16 lcl_cmp_flg;
 	u16 reserved1;
 #endif
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
 #if defined(__BIG_ENDIAN)
-	u8 vlan_flag;
-	u8 s_id[3];
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
 #elif defined(__LITTLE_ENDIAN)
-	u8 s_id[3];
-	u8 vlan_flag;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 reserved3;
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 d_id[3];
-	u8 reserved3;
-#endif
-	u32 context_id;
-	u32 conn_id;
-	u32 reserved4;
-};
-
-/*
- * FCoE connection destroy request
- */
-struct fcoe_kwqe_conn_destroy {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 context_id;
-	u32 conn_id;
-	u32 reserved1[5];
-};
-
-/*
- * FCoe destroy request
- */
-struct fcoe_kwqe_destroy {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 reserved1[7];
-};
-
-/*
- * FCoe statistics request
- */
-struct fcoe_kwqe_stat {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 stat_params_addr_lo;
-	u32 stat_params_addr_hi;
-	u32 reserved1[5];
-};
-
-/*
- * FCoE KWQ WQE
- */
-union fcoe_kwqe {
-	struct fcoe_kwqe_init1 init1;
-	struct fcoe_kwqe_init2 init2;
-	struct fcoe_kwqe_init3 init3;
-	struct fcoe_kwqe_conn_offload1 conn_offload1;
-	struct fcoe_kwqe_conn_offload2 conn_offload2;
-	struct fcoe_kwqe_conn_offload3 conn_offload3;
-	struct fcoe_kwqe_conn_offload4 conn_offload4;
-	struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
-	struct fcoe_kwqe_conn_destroy conn_destroy;
-	struct fcoe_kwqe_destroy destroy;
-	struct fcoe_kwqe_stat statistics;
-};
-
-struct fcoe_task_ctx_entry {
-	struct fcoe_task_ctx_entry_tx_only tx_wr_only;
-	struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
-	struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
-	struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
-	struct fcoe_task_ctx_entry_rx_only rx_wr_only;
-	u32 reserved[4];
-};
-
-/*
- * FCoE connection enable\disable params passed by driver to FW in FCoE enable ramrod
- */
-struct fcoe_conn_enable_disable_ramrod_params {
-	struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
-};
-
-
-/*
- * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
- */
-struct fcoe_conn_offload_ramrod_params {
-	struct fcoe_kwqe_conn_offload1 offload_kwqe1;
-	struct fcoe_kwqe_conn_offload2 offload_kwqe2;
-	struct fcoe_kwqe_conn_offload3 offload_kwqe3;
-	struct fcoe_kwqe_conn_offload4 offload_kwqe4;
-};
-
-/*
- * FCoE init params passed by driver to FW in FCoE init ramrod
- */
-struct fcoe_init_ramrod_params {
-	struct fcoe_kwqe_init1 init_kwqe1;
-	struct fcoe_kwqe_init2 init_kwqe2;
-	struct fcoe_kwqe_init3 init_kwqe3;
-	struct regpair eq_addr;
-	struct regpair eq_next_page_addr;
-#if defined(__BIG_ENDIAN)
-	u16 sb_num;
-	u16 eq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 eq_prod;
-	u16 sb_num;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 reserved0;
-	u8 sb_id;
-#elif defined(__LITTLE_ENDIAN)
-	u8 sb_id;
-	u8 reserved0;
-	u16 reserved1;
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
 #endif
 };
 
 
 /*
- * FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
+ * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$
  */
-struct fcoe_stat_ramrod_params {
-	struct fcoe_kwqe_stat stat_kwqe;
+struct iscsi_l2_ooo_data {
+	__le32 iscsi_cid;
+	u8 drop_isle;
+	u8 drop_size;
+	u8 ooo_opcode;
+	u8 ooo_isle;
+	u8 reserved[8];
 };
 
 
-/*
- * FCoE 16-bits vlan structure
- */
-struct fcoe_vlan_fields {
-	u16 fields;
-#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+
+
+
+
+struct iscsi_task_context_entry_xuc_c_write_only {
+	u32 total_data_acked;
 };
 
-
-/*
- * FCoE 16-bits vlan union
- */
-union fcoe_vlan_field_union {
-	struct fcoe_vlan_fields fields;
-	u16 val;
+struct iscsi_task_context_r2t_table_entry {
+	u32 ttt;
+	u32 desired_data_len;
 };
 
-/*
- * Parameters used for Class 2 verifications
- */
-struct ustorm_fcoe_c2_params {
+struct iscsi_task_context_entry_xuc_u_write_only {
+	u32 exp_r2t_sn;
+	struct iscsi_task_context_r2t_table_entry r2t_table[4];
 #if defined(__BIG_ENDIAN)
-	u16 e2e_credit;
-	u16 con_seq;
+	u16 data_in_count;
+	u8 cq_id;
+	u8 valid_1b;
 #elif defined(__LITTLE_ENDIAN)
-	u16 con_seq;
-	u16 e2e_credit;
+	u8 valid_1b;
+	u8 cq_id;
+	u16 data_in_count;
 #endif
-#if defined(__BIG_ENDIAN)
-	u16 ackq_prod;
-	u16 open_seq_per_exch;
-#elif defined(__LITTLE_ENDIAN)
-	u16 open_seq_per_exch;
-	u16 ackq_prod;
-#endif
-	struct regpair ackq_pbl_base;
-	struct regpair ackq_cur_seg;
 };
 
-/*
- * Parameters used for Class 2 verifications
- */
-struct xstorm_fcoe_c2_params {
-#if defined(__BIG_ENDIAN)
-	u16 reserved0;
-	u8 ackq_x_prod;
-	u8 max_conc_seqs_c2;
-#elif defined(__LITTLE_ENDIAN)
-	u8 max_conc_seqs_c2;
-	u8 ackq_x_prod;
-	u16 reserved0;
-#endif
-	struct regpair ackq_pbl_base;
-	struct regpair ackq_cur_seg;
+struct iscsi_task_context_entry_xuc {
+	struct iscsi_task_context_entry_xuc_c_write_only write_c;
+	u32 exp_data_transfer_len;
+	struct iscsi_task_context_entry_xuc_x_write_only write_x;
+	u32 lun_lo;
+	struct iscsi_task_context_entry_xuc_xu_write_both write_xu;
+	u32 lun_hi;
+	struct iscsi_task_context_entry_xuc_u_write_only write_u;
 };
 
-/*
- * Buffer per connection, used in Tstorm
- */
-struct iscsi_conn_buf {
-	struct regpair reserved[8];
+struct iscsi_task_context_entry_u {
+	u32 exp_r2t_buff_offset;
+	u32 rem_rcv_len;
+	u32 exp_data_sn;
 };
 
+struct iscsi_task_context_entry {
+	struct iscsi_task_context_entry_x tce_x;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u16 data_out_count;
+#endif
+	struct iscsi_task_context_entry_xuc tce_xuc;
+	struct iscsi_task_context_entry_u tce_u;
+	u32 rsrv1[7];
+};
+
+
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_x_init_only {
+	struct regpair lun;
+	u32 exp_data_transfer_len;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
 /*
  * ipv6 structure
  */
@@ -4379,6 +5067,8 @@
 	u32 ip_addr_hi_hi;
 };
 
+
+
 /*
  * l5cm- connection identification params
  */
@@ -4460,8 +5150,7 @@
  * l5cm-tstorm connection buffer
  */
 struct l5cm_tstorm_conn_buffer {
-	u32 snd_buf;
-	u32 rcv_buf;
+	u32 rsrv1[2];
 #if defined(__BIG_ENDIAN)
 	u16 params;
 #define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
@@ -4493,6 +5182,72 @@
 	struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
 };
 
+
+
+/*
+ * The l5cm opaque buffer passed in add new connection ramrod passive side
+ */
+struct l5cm_hash_input_string {
+	u32 __opaque1;
+#if defined(__BIG_ENDIAN)
+	u16 __opaque3;
+	u16 __opaque2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __opaque2;
+	u16 __opaque3;
+#endif
+	struct ip_v6_addr __opaque4;
+	struct ip_v6_addr __opaque5;
+	u32 __opaque6;
+	u32 __opaque7[5];
+};
+
+
+/*
+ * syn cookie component
+ */
+struct l5cm_syn_cookie_comp {
+	u32 __opaque;
+};
+
+/*
+ * data related to listeners of a TCP port
+ */
+struct l5cm_port_listener_data {
+	u8 params;
+#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0)
+#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1)
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5)
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6)
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7)
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7
+};
+
+/*
+ * Opaque structure passed from U to X when final ack arrives
+ */
+struct l5cm_opaque_buf {
+	u32 __opaque1;
+	u32 __opaque2;
+	u32 __opaque3;
+	u32 __opaque4;
+	struct l5cm_syn_cookie_comp __opaque5;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv2;
+	u8 rsrv;
+	struct l5cm_port_listener_data __opaque6;
+#elif defined(__LITTLE_ENDIAN)
+	struct l5cm_port_listener_data __opaque6;
+	u8 rsrv;
+	u16 rsrv2;
+#endif
+};
+
+
 /*
  * l5cm slow path element
  */
@@ -4501,6 +5256,109 @@
 	u32 rsrv;
 };
 
+
+/*
+ * The final-ack union structure in PCS entry after final ack arrived
+ */
+struct l5cm_pcse_ack {
+	struct l5cm_xstorm_conn_buffer tx_socket_params;
+	struct l5cm_opaque_buf opaque_buf;
+	struct l5cm_tstorm_conn_buffer rx_socket_params;
+};
+
+
+/*
+ * The syn union structure in PCS entry after syn arrived
+ */
+struct l5cm_pcse_syn {
+	struct l5cm_opaque_buf opaque_buf;
+	u32 rsrv[12];
+};
+
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_attributes {
+#if defined(__BIG_ENDIAN)
+	u16 pcs_id;
+	u8 status;
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+	u8 status;
+	u16 pcs_id;
+#endif
+};
+
+
+union l5cm_seg_params {
+	struct l5cm_pcse_syn syn_seg_params;
+	struct l5cm_pcse_ack ack_seg_params;
+};
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_hdr {
+	struct l5cm_hash_input_string hash_input_string;
+	struct l5cm_conn_addr_params conn_addr_buf;
+	u32 cid;
+	u32 hash_result;
+	union l5cm_seg_params seg_params;
+	struct l5cm_pcs_attributes att;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv;
+	u16 rx_seg_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_seg_size;
+	u16 rsrv;
+#endif
+};
+
+/*
+ * pcs entry for passive connections
+ */
+struct l5cm_pcs_entry {
+	struct l5cm_pcs_hdr hdr;
+	u8 rx_segment[1516];
+};
+
+
+
+
 /*
  * l5cm connection parameters
  */
@@ -4535,6 +5393,29 @@
 	union l5cm_specific_data data;
 };
 
+
+
+
+/*
+ * Termination variables
+ */
+struct l5cm_term_vars {
+	u8 BitMap;
+#define L5CM_TERM_VARS_TCP_STATE (0xF<<0)
+#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define L5CM_TERM_VARS_RSRV (0x1<<7)
+#define L5CM_TERM_VARS_RSRV_SHIFT 7
+};
+
+
+
+
 /*
  * Tstorm Tcp flags
  */
@@ -4550,6 +5431,7 @@
 #define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
 };
 
+
 /*
  * Xstorm Tcp flags
  */
@@ -4565,4 +5447,38 @@
 #define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
 };
 
-#endif /* CNIC_DEFS_H */
+
+
+/*
+ * Out-of-order states
+ */
+enum tcp_ooo_event {
+	TCP_EVENT_ADD_PEN = 0,
+	TCP_EVENT_ADD_NEW_ISLE = 1,
+	TCP_EVENT_ADD_ISLE_RIGHT = 2,
+	TCP_EVENT_ADD_ISLE_LEFT = 3,
+	TCP_EVENT_JOIN = 4,
+	TCP_EVENT_NOP = 5,
+	MAX_TCP_OOO_EVENT
+};
+
+
+/*
+ * OOO support modes
+ */
+enum tcp_tstorm_ooo {
+	TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0,
+	TCP_TSTORM_OOO_SEND_PURE_ACK = 1,
+	TCP_TSTORM_OOO_SUPPORTED = 2,
+	MAX_TCP_TSTORM_OOO
+};
+
+
+
+
+
+
+
+
+
+#endif /* __5710_HSI_CNIC_LE__ */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index fdd8e46..bc7000a 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.2.14"
-#define CNIC_MODULE_RELDATE	"Mar 30, 2011"
+#define CNIC_MODULE_VERSION	"2.5.3"
+#define CNIC_MODULE_RELDATE	"June 6, 2011"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -99,6 +99,8 @@
 
 struct cnic_ctl_completion {
 	u32	cid;
+	u8	opcode;
+	u8	error;
 };
 
 struct cnic_ctl_info {
@@ -169,7 +171,7 @@
 	struct pci_dev	*pdev;
 	void __iomem	*io_base;
 	void __iomem	*io_base2;
-	void		*iro_arr;
+	const void	*iro_arr;
 
 	u32		ctx_tbl_offset;
 	u32		ctx_tbl_len;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index fec939f..086ce04 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 
 #include <linux/sched.h>
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 7e3cfbe..c9957b7 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3704,7 +3704,7 @@
 	if (err) {
 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
 		err = 0;
-	};
+	}
 
 	if (cxgb4_debugfs_root) {
 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 4fd821a..6e9a8d9 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
 #ifndef __CXGB4VF_ADAPTER_H__
 #define __CXGB4VF_ADAPTER_H__
 
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/spinlock.h>
 #include <linux/skbuff.h>
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index dcc4a17..55c8245 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1083,6 +1083,8 @@
 		goto fail_tx;
 	}
 
+	skb_tx_timestamp(skb);
+
 	ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
 				     GFP_KERNEL);
 	if (unlikely(ret_code != 0)) {
@@ -1489,14 +1491,14 @@
  */
 static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 {
-	dev_warn(&ndev->dev, "DaVinci EMAC: ioctl not supported\n");
+	struct emac_priv *priv = netdev_priv(ndev);
 
 	if (!(netif_running(ndev)))
 		return -EINVAL;
 
 	/* TODO: Add phy read and write and private statistics get feature */
 
-	return -EOPNOTSUPP;
+	return phy_mii_ioctl(priv->phydev, ifrq, cmd);
 }
 
 static int match_first_device(struct device *dev, void *data)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index ee597e6..8ef31dc 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -24,6 +24,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/crc32.h>
@@ -534,21 +535,35 @@
 	board_info_t *dm = to_dm9000_board(dev);
 	int offset = ee->offset;
 	int len = ee->len;
-	int i;
+	int done;
 
 	/* EEPROM access is aligned to two bytes */
 
-	if ((len & 1) != 0 || (offset & 1) != 0)
-		return -EINVAL;
-
 	if (dm->flags & DM9000_PLATF_NO_EEPROM)
 		return -ENOENT;
 
 	if (ee->magic != DM_EEPROM_MAGIC)
 		return -EINVAL;
 
-	for (i = 0; i < len; i += 2)
-		dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
+	while (len > 0) {
+		if (len & 1 || offset & 1) {
+			int which = offset & 1;
+			u8 tmp[2];
+
+			dm9000_read_eeprom(dm, offset / 2, tmp);
+			tmp[which] = *data;
+			dm9000_write_eeprom(dm, offset / 2, tmp);
+
+			done = 1;
+		} else {
+			dm9000_write_eeprom(dm, offset / 2, data);
+			done = 2;
+		}
+
+		data += done;
+		offset += done;
+		len -= done;
+	}
 
 	return 0;
 }
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 8318ea0..c1063d1 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/dma-mapping.h>
@@ -587,6 +588,8 @@
 		dnet_writel(bp, irq_enable, INTR_ENB);
 	}
 
+	skb_tx_timestamp(skb);
+
 	/* free the buffer */
 	dev_kfree_skb(skb);
 
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index e336c79..c1352c6 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -149,6 +149,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 7501d97..1698622 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -3080,7 +3080,6 @@
 {
 	u32 ctrl, ctrl_ext;
 	u32 led_ctrl;
-	s32 ret_val;
 
 	e_dbg("e1000_phy_hw_reset");
 
@@ -3126,11 +3125,7 @@
 	}
 
 	/* Wait for FW to finish PHY configuration. */
-	ret_val = e1000_get_phy_cfg_done(hw);
-	if (ret_val != E1000_SUCCESS)
-		return ret_val;
-
-	return ret_val;
+	return e1000_get_phy_cfg_done(hw);
 }
 
 /**
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 9549879..c1e7f94 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -104,6 +104,7 @@
 	 (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
 
 /* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
 #define BM_RCTL         PHY_REG(BM_WUC_PAGE, 0)
 #define BM_WUC          PHY_REG(BM_WUC_PAGE, 1)
 #define BM_WUFC         PHY_REG(BM_WUC_PAGE, 2)
@@ -122,20 +123,21 @@
 #define BM_RCTL_PMCF          0x0040          /* Pass MAC Control Frames */
 #define BM_RCTL_RFCE          0x0080          /* Rx Flow Control Enable */
 
-#define HV_SCC_UPPER		PHY_REG(778, 16) /* Single Collision Count */
-#define HV_SCC_LOWER		PHY_REG(778, 17)
-#define HV_ECOL_UPPER		PHY_REG(778, 18) /* Excessive Collision Count */
-#define HV_ECOL_LOWER		PHY_REG(778, 19)
-#define HV_MCC_UPPER		PHY_REG(778, 20) /* Multiple Collision Count */
-#define HV_MCC_LOWER		PHY_REG(778, 21)
-#define HV_LATECOL_UPPER	PHY_REG(778, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER	PHY_REG(778, 24)
-#define HV_COLC_UPPER		PHY_REG(778, 25) /* Collision Count */
-#define HV_COLC_LOWER		PHY_REG(778, 26)
-#define HV_DC_UPPER		PHY_REG(778, 27) /* Defer Count */
-#define HV_DC_LOWER		PHY_REG(778, 28)
-#define HV_TNCRS_UPPER		PHY_REG(778, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER		PHY_REG(778, 30)
+#define HV_STATS_PAGE	778
+#define HV_SCC_UPPER	PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER	PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER	PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER	PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER	PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER	PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER	PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER	PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER	PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER	PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER	PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER	PHY_REG(HV_STATS_PAGE, 30)
 
 #define E1000_FCRTV_PCH     0x05F40 /* PCH Flow Control Refresh Timer Value */
 
@@ -533,7 +535,8 @@
 						 bool state);
 extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
 extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
 extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
 extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
@@ -584,6 +587,7 @@
 extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
 extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
 extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
 extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
                                           u16 *data);
@@ -604,6 +608,10 @@
 extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
 extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						 u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						  u16 *phy_reg);
 extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
@@ -624,9 +632,13 @@
 extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
 extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
                                         u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				      u16 *data);
 extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
                                          u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				       u16 data);
 extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
 extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
 extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index f4bbeb2..c0ecb2d 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -836,6 +836,7 @@
 	struct e1000_mac_info *mac = &hw->mac;
 	u32 reg_data;
 	s32 ret_val;
+	u16 kum_reg_data;
 	u16 i;
 
 	e1000_initialize_hw_bits_80003es2lan(hw);
@@ -861,6 +862,13 @@
 	/* Setup link and flow control */
 	ret_val = e1000e_setup_link(hw);
 
+	/* Disable IBIST slave mode (far-end loopback) */
+	e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					&kum_reg_data);
+	kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+	e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					 kum_reg_data);
+
 	/* Set the transmit descriptor write-back policy */
 	reg_data = er32(TXDCTL(0));
 	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 859d0d3..cb1a362 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -28,6 +28,7 @@
 
 /* ethtool support for e1000 */
 
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/pci.h>
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 6c2fa83..2967039 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -246,6 +246,7 @@
 #define BM_WUC_ENABLE_REG		17
 #define BM_WUC_ENABLE_BIT		(1 << 2)
 #define BM_WUC_HOST_WU_BIT		(1 << 4)
+#define BM_WUC_ME_WU_BIT		(1 << 5)
 
 #define BM_WUC	PHY_REG(BM_WUC_PAGE, 1)
 #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
@@ -312,6 +313,7 @@
 #define E1000_KMRNCTRLSTA_DIAG_OFFSET	0x3    /* Kumeran Diagnostic */
 #define E1000_KMRNCTRLSTA_TIMEOUTS	0x4    /* Kumeran Timeouts */
 #define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE	0x0200 /* Kumeran IBIST Disable */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
 #define E1000_KMRNCTRLSTA_K1_ENABLE	0x0002
@@ -777,7 +779,21 @@
 	s32  (*read_mac_addr)(struct e1000_hw *);
 };
 
-/* Function pointers for the PHY. */
+/*
+ * When to use various PHY register access functions:
+ *
+ *                 Func   Caller
+ *   Function      Does   Does    When to use
+ *   ~~~~~~~~~~~~  ~~~~~  ~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *   X_reg         L,P,A  n/a     for simple PHY reg accesses
+ *   X_reg_locked  P,A    L       for multiple accesses of different regs
+ *                                on different pages
+ *   X_reg_page    A      L,P     for multiple accesses of different regs
+ *                                on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
 struct e1000_phy_operations {
 	s32  (*acquire)(struct e1000_hw *);
 	s32  (*cfg_on_link_up)(struct e1000_hw *);
@@ -788,14 +804,17 @@
 	s32  (*get_cfg_done)(struct e1000_hw *hw);
 	s32  (*get_cable_length)(struct e1000_hw *);
 	s32  (*get_info)(struct e1000_hw *);
+	s32  (*set_page)(struct e1000_hw *, u16);
 	s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
 	s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_page)(struct e1000_hw *, u32, u16 *);
 	void (*release)(struct e1000_hw *);
 	s32  (*reset)(struct e1000_hw *);
 	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
 	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
 	s32  (*write_reg)(struct e1000_hw *, u32, u16);
 	s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_page)(struct e1000_hw *, u32, u16);
 	void (*power_up)(struct e1000_hw *);
 	void (*power_down)(struct e1000_hw *);
 };
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 3369d1f..c175212 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -275,6 +275,19 @@
 #define ew16flash(reg,val)	__ew16flash(hw, (reg), (val))
 #define ew32flash(reg,val)	__ew32flash(hw, (reg), (val))
 
+static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+	ew32(CTRL, ctrl);
+	udelay(10);
+	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+	ew32(CTRL, ctrl);
+}
+
 /**
  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
  *  @hw: pointer to the HW structure
@@ -284,18 +297,21 @@
 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
-	u32 ctrl, fwsm;
+	u32 fwsm;
 	s32 ret_val = 0;
 
 	phy->addr                     = 1;
 	phy->reset_delay_us           = 100;
 
+	phy->ops.set_page             = e1000_set_page_igp;
 	phy->ops.read_reg             = e1000_read_phy_reg_hv;
 	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
+	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
 	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
 	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
 	phy->ops.write_reg            = e1000_write_phy_reg_hv;
 	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
+	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
 	phy->ops.power_up             = e1000_power_up_phy_copper;
 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
@@ -308,13 +324,7 @@
 	 */
 	fwsm = er32(FWSM);
 	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
-		ctrl = er32(CTRL);
-		ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
-		ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
-		ew32(CTRL, ctrl);
-		udelay(10);
-		ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
-		ew32(CTRL, ctrl);
+		e1000_toggle_lanphypc_value_ich8lan(hw);
 		msleep(50);
 
 		/*
@@ -882,8 +892,13 @@
 	u32 extcnf_ctrl;
 
 	extcnf_ctrl = er32(EXTCNF_CTRL);
-	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
-	ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+	} else {
+		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+	}
 
 	mutex_unlock(&swflag_mutex);
 }
@@ -1376,14 +1391,11 @@
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		goto out;
-	ret_val = hw->phy.ops.read_reg_locked(hw,
-	                                      PHY_REG(BM_PORT_CTRL_PAGE, 17),
-	                                      &phy_data);
+	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
 	if (ret_val)
 		goto release;
-	ret_val = hw->phy.ops.write_reg_locked(hw,
-	                                       PHY_REG(BM_PORT_CTRL_PAGE, 17),
-	                                       phy_data & 0x00FF);
+	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+					       phy_data & 0x00FF);
 release:
 	hw->phy.ops.release(hw);
 out:
@@ -1397,17 +1409,36 @@
 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
 {
 	u32 mac_reg;
-	u16 i;
+	u16 i, phy_reg = 0;
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return;
+	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+	if (ret_val)
+		goto release;
 
 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
 	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
 		mac_reg = er32(RAL(i));
-		e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
-		e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+					   (u16)((mac_reg >> 16) & 0xFFFF));
+
 		mac_reg = er32(RAH(i));
-		e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
-		e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+					   (u16)((mac_reg & E1000_RAH_AV)
+						 >> 16));
 	}
+
+	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+	hw->phy.ops.release(hw);
 }
 
 /**
@@ -1726,9 +1757,12 @@
 		break;
 	}
 
-	/* Dummy read to clear the phy wakeup bit after lcd reset */
-	if (hw->mac.type >= e1000_pchlan)
-		e1e_rphy(hw, BM_WUC, &reg);
+	/* Clear the host wakeup bit after lcd reset */
+	if (hw->mac.type >= e1000_pchlan) {
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+		reg &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+	}
 
 	/* Configure the LCD with the extended configuration region in NVM */
 	ret_val = e1000_sw_lcd_config_ich8lan(hw);
@@ -3059,7 +3093,7 @@
 	msleep(20);
 
 	if (!ret_val)
-		e1000_release_swflag_ich8lan(hw);
+		mutex_unlock(&swflag_mutex);
 
 	if (ctrl & E1000_CTRL_PHY_RST) {
 		ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -3127,11 +3161,13 @@
 
 	/*
 	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
-	 * the ME.  Reading the BM_WUC register will clear the host wakeup bit.
+	 * the ME.  Disable wakeup by clearing the host wakeup bit.
 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
 	 */
 	if (hw->phy.type == e1000_phy_82578) {
-		e1e_rphy(hw, BM_WUC, &i);
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+		i &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
 		if (ret_val)
 			return ret_val;
@@ -3586,17 +3622,16 @@
 }
 
 /**
- *  e1000e_disable_gig_wol_ich8lan - disable gig during WoL
+ *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
  *  @hw: pointer to the HW structure
  *
  *  During S0 to Sx transition, it is possible the link remains at gig
  *  instead of negotiating to a lower speed.  Before going to Sx, set
  *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
- *  to a lower speed.
- *
- *  Should only be called for applicable parts.
+ *  to a lower speed.  For PCH and newer parts, the OEM bits PHY register
+ *  (LED, GbE disable and LPLU configurations) also needs to be written.
  **/
-void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 {
 	u32 phy_ctrl;
 	s32 ret_val;
@@ -3616,6 +3651,60 @@
 }
 
 /**
+ *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ *  @hw: pointer to the HW structure
+ *
+ *  During Sx to S0 transitions on non-managed devices or managed devices
+ *  on which PHY resets are not blocked, if the PHY registers cannot be
+ *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ *  the PHY.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type != e1000_pch2lan)
+		return;
+
+	fwsm = er32(FWSM);
+	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
+		u16 phy_id1, phy_id2;
+		s32 ret_val;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to acquire PHY semaphore in resume\n");
+			return;
+		}
+
+		/* Test access to the PHY registers by reading the ID regs */
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+		if (ret_val)
+			goto release;
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+		if (ret_val)
+			goto release;
+
+		if (hw->phy.id == ((u32)(phy_id1 << 16) |
+				   (u32)(phy_id2 & PHY_REVISION_MASK)))
+			goto release;
+
+		e1000_toggle_lanphypc_value_ich8lan(hw);
+
+		hw->phy.ops.release(hw);
+		msleep(50);
+		e1000_phy_hw_reset(hw);
+		msleep(50);
+		return;
+	}
+
+release:
+	hw->phy.ops.release(hw);
+
+	return;
+}
+
+/**
  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
  *  @hw: pointer to the HW structure
  *
@@ -3832,6 +3921,7 @@
 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
 {
 	u16 phy_data;
+	s32 ret_val;
 
 	e1000e_clear_hw_cntrs_base(hw);
 
@@ -3853,20 +3943,29 @@
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
-		e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
-		e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
-		e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
-		e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
-		e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
-		e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
-		e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
-		e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
-		e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
-		e1e_rphy(hw, HV_DC_UPPER, &phy_data);
-		e1e_rphy(hw, HV_DC_LOWER, &phy_data);
-		e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
-		e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return;
+		ret_val = hw->phy.ops.set_page(hw,
+					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
+		if (ret_val)
+			goto release;
+		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+		hw->phy.ops.release(hw);
 	}
 }
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3310c3d..3bf5249 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -31,6 +31,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
@@ -53,9 +54,9 @@
 
 #include "e1000.h"
 
-#define DRV_EXTRAVERSION "-k2"
+#define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
+#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -3346,7 +3347,7 @@
 		e1000_configure_msix(adapter);
 	e1000_irq_enable(adapter);
 
-	netif_wake_queue(adapter->netdev);
+	netif_start_queue(adapter->netdev);
 
 	/* fire a link change interrupt to start the watchdog */
 	if (adapter->msix_entries)
@@ -3413,17 +3414,16 @@
 	e1000e_update_stats(adapter);
 	spin_unlock(&adapter->stats64_lock);
 
+	e1000e_flush_descriptors(adapter);
+	e1000_clean_tx_ring(adapter);
+	e1000_clean_rx_ring(adapter);
+
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
 
 	if (!pci_channel_offline(adapter->pdev))
 		e1000e_reset(adapter);
 
-	e1000e_flush_descriptors(adapter);
-
-	e1000_clean_tx_ring(adapter);
-	e1000_clean_rx_ring(adapter);
-
 	/*
 	 * TODO: for power management, we could drop the link and
 	 * pci_disable_device here.
@@ -3833,6 +3833,8 @@
 /**
  * e1000e_update_phy_stats - Update the PHY statistics counters
  * @adapter: board private structure
+ *
+ * Read/clear the upper 16-bit PHY registers and read/accumulate lower
  **/
 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
 {
@@ -3844,89 +3846,61 @@
 	if (ret_val)
 		return;
 
-	hw->phy.addr = 1;
-
-#define HV_PHY_STATS_PAGE	778
 	/*
 	 * A page set is expensive so check if already on desired page.
 	 * If not, set to the page with the PHY status registers.
 	 */
+	hw->phy.addr = 1;
 	ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
 					   &phy_data);
 	if (ret_val)
 		goto release;
-	if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
-		ret_val = e1000e_write_phy_reg_mdic(hw,
-						    IGP01E1000_PHY_PAGE_SELECT,
-						    (HV_PHY_STATS_PAGE <<
-						     IGP_PAGE_SHIFT));
+	if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
+		ret_val = hw->phy.ops.set_page(hw,
+					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
 		if (ret_val)
 			goto release;
 	}
 
-	/* Read/clear the upper 16-bit registers and read/accumulate lower */
-
 	/* Single Collision Count */
-	e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.scc += phy_data;
 
 	/* Excessive Collision Count */
-	e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.ecol += phy_data;
 
 	/* Multiple Collision Count */
-	e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.mcc += phy_data;
 
 	/* Late Collision Count */
-	e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_LATECOL_LOWER &
-					   MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.latecol += phy_data;
 
 	/* Collision Count - also used for adaptive IFS */
-	e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
 	if (!ret_val)
 		hw->mac.collision_delta = phy_data;
 
 	/* Defer Count */
-	e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.dc += phy_data;
 
 	/* Transmit with no CRS */
-	e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
-				 &phy_data);
-	ret_val = e1000e_read_phy_reg_mdic(hw,
-					   HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
-					   &phy_data);
+	hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+	ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
 	if (!ret_val)
 		adapter->stats.tncrs += phy_data;
 
@@ -5154,21 +5128,34 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 i, mac_reg;
-	u16 phy_reg;
+	u16 phy_reg, wuc_enable;
 	int retval = 0;
 
 	/* copy MAC RARs to PHY RARs */
 	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
 
-	/* copy MAC MTA to PHY MTA */
+	retval = hw->phy.ops.acquire(hw);
+	if (retval) {
+		e_err("Could not acquire PHY\n");
+		return retval;
+	}
+
+	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+	if (retval)
+		goto out;
+
+	/* copy MAC MTA to PHY MTA - only needed for pchlan */
 	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
 		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
-		e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
-		e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+					   (u16)((mac_reg >> 16) & 0xFFFF));
 	}
 
 	/* configure PHY Rx Control register */
-	e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
+	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
 	mac_reg = er32(RCTL);
 	if (mac_reg & E1000_RCTL_UPE)
 		phy_reg |= BM_RCTL_UPE;
@@ -5185,31 +5172,19 @@
 	mac_reg = er32(CTRL);
 	if (mac_reg & E1000_CTRL_RFCE)
 		phy_reg |= BM_RCTL_RFCE;
-	e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
 
 	/* enable PHY wakeup in MAC register */
 	ew32(WUFC, wufc);
 	ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
 
 	/* configure and enable PHY wakeup in PHY registers */
-	e1e_wphy(&adapter->hw, BM_WUFC, wufc);
-	e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
 
 	/* activate PHY wakeup */
-	retval = hw->phy.ops.acquire(hw);
-	if (retval) {
-		e_err("Could not acquire PHY\n");
-		return retval;
-	}
-	e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
-	                         (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
-	retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
-	if (retval) {
-		e_err("Could not read PHY page 769\n");
-		goto out;
-	}
-	phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
-	retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
 	if (retval)
 		e_err("Could not set PHY Host Wakeup bit\n");
 out:
@@ -5277,7 +5252,7 @@
 		}
 
 		if (adapter->flags & FLAG_IS_ICH)
-			e1000e_disable_gig_wol_ich8lan(&adapter->hw);
+			e1000_suspend_workarounds_ich8lan(&adapter->hw);
 
 		/* Allow time for pending master requests to run */
 		e1000e_disable_pcie_master(&adapter->hw);
@@ -5428,6 +5403,9 @@
 			return err;
 	}
 
+	if (hw->mac.type == e1000_pch2lan)
+		e1000_resume_workarounds_pchlan(&adapter->hw);
+
 	e1000e_power_up_phy(adapter);
 
 	/* report the system wakeup cause from S3/S4 */
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 484774c..2a6ee13 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -36,7 +36,7 @@
 static s32 e1000_wait_autoneg(struct e1000_hw *hw);
 static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
 static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
-					  u16 *data, bool read);
+					  u16 *data, bool read, bool page_set);
 static u32 e1000_get_phy_addr_for_hv_page(u32 page);
 static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
                                           u16 *data, bool read);
@@ -348,6 +348,24 @@
 }
 
 /**
+ *  e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ *  @hw: pointer to the HW structure
+ *  @page: page to set (shifted left when necessary)
+ *
+ *  Sets PHY page required for PHY register access.  Assumes semaphore is
+ *  already acquired.  Note, this function sets phy.addr to 1 so the caller
+ *  must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+	e_dbg("Setting page 0x%x\n", page);
+
+	hw->phy.addr = 1;
+
+	return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
  *  __e1000e_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
@@ -2418,7 +2436,7 @@
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
-							 false);
+							 false, false);
 		goto out;
 	}
 
@@ -2477,7 +2495,7 @@
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
-							 true);
+							 true, false);
 		goto out;
 	}
 
@@ -2535,7 +2553,7 @@
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
-							 true);
+							 true, false);
 		goto out;
 	}
 
@@ -2579,7 +2597,7 @@
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
-							 false);
+							 false, false);
 		goto out;
 	}
 
@@ -2603,104 +2621,163 @@
 }
 
 /**
- *  e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
+ *  e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ *  Assumes semaphore already acquired and phy_reg points to a valid memory
+ *  address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val;
+	u16 temp;
+
+	/* All page select, port ctrl and wakeup registers use phy address 1 */
+	hw->phy.addr = 1;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+	if (ret_val) {
+		e_dbg("Could not read PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/*
+	 * Enable both PHY wakeup mode and Wakeup register page writes.
+	 * Prevent a power state change by disabling ME and Host PHY wakeup.
+	 */
+	temp = *phy_reg;
+	temp |= BM_WUC_ENABLE_BIT;
+	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+	if (ret_val) {
+		e_dbg("Could not write PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/* Select Host Wakeup Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+	/* caller now able to write registers on the Wakeup registers page */
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ *  Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ *  Assumes semaphore already acquired and *phy_reg is the contents of the
+ *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ *  caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val = 0;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	/* Restore 769.17 to its original value */
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+	if (ret_val)
+		e_dbg("Could not restore PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read or written
  *  @data: pointer to the data to read or write
  *  @read: determines if operation is read or write
+ *  @page_set: BM_WUC_PAGE already set and access enabled
  *
- *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
- *  semaphores before exiting. Note that procedure to read the wakeup
- *  registers are different. It works as such:
- *  1) Set page 769, register 17, bit 2 = 1
+ *  Read the PHY register at offset and store the retrieved information in
+ *  data, or write data to PHY register at offset.  Note the procedure to
+ *  access the PHY wakeup registers is different than reading the other PHY
+ *  registers. It works as such:
+ *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
  *  2) Set page to 800 for host (801 if we were manageability)
  *  3) Write the address using the address opcode (0x11)
  *  4) Read or write the data using the data opcode (0x12)
- *  5) Restore 769_17.2 to its original value
+ *  5) Restore 769.17.2 to its original value
  *
- *  Assumes semaphore already acquired.
+ *  Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ *  step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ *  Assumes semaphore is already acquired.  When page_set==true, assumes
+ *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ *  is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
  **/
 static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
-					  u16 *data, bool read)
+					  u16 *data, bool read, bool page_set)
 {
 	s32 ret_val;
 	u16 reg = BM_PHY_REG_NUM(offset);
+	u16 page = BM_PHY_REG_PAGE(offset);
 	u16 phy_reg = 0;
 
-	/* Gig must be disabled for MDIO accesses to page 800 */
+	/* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
 	if ((hw->mac.type == e1000_pchlan) &&
-	   (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
-		e_dbg("Attempting to access page 800 while gig enabled.\n");
+	    (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+		e_dbg("Attempting to access page %d while gig enabled.\n",
+		      page);
 
-	/* All operations in this function are phy address 1 */
-	hw->phy.addr = 1;
-
-	/* Set page 769 */
-	e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
-	                          (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
-
-	ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
-	if (ret_val) {
-		e_dbg("Could not read PHY page 769\n");
-		goto out;
+	if (!page_set) {
+		/* Enable access to PHY wakeup registers */
+		ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+		if (ret_val) {
+			e_dbg("Could not enable PHY wakeup reg access\n");
+			goto out;
+		}
 	}
 
-	/* First clear bit 4 to avoid a power state change */
-	phy_reg &= ~(BM_WUC_HOST_WU_BIT);
-	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
-	if (ret_val) {
-		e_dbg("Could not clear PHY page 769 bit 4\n");
-		goto out;
-	}
+	e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
 
-	/* Write bit 2 = 1, and clear bit 4 to 769_17 */
-	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
-	                                    phy_reg | BM_WUC_ENABLE_BIT);
-	if (ret_val) {
-		e_dbg("Could not write PHY page 769 bit 2\n");
-		goto out;
-	}
-
-	/* Select page 800 */
-	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
-	                                    (BM_WUC_PAGE << IGP_PAGE_SHIFT));
-
-	/* Write the page 800 offset value using opcode 0x11 */
+	/* Write the Wakeup register page offset value using opcode 0x11 */
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
 	if (ret_val) {
-		e_dbg("Could not write address opcode to page 800\n");
+		e_dbg("Could not write address opcode to page %d\n", page);
 		goto out;
 	}
 
 	if (read) {
-	        /* Read the page 800 value using opcode 0x12 */
+		/* Read the Wakeup register page value using opcode 0x12 */
 		ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
 		                                   data);
 	} else {
-	        /* Write the page 800 value using opcode 0x12 */
+		/* Write the Wakeup register page value using opcode 0x12 */
 		ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
 						    *data);
 	}
 
 	if (ret_val) {
-		e_dbg("Could not access data value from page 800\n");
+		e_dbg("Could not access PHY reg %d.%d\n", page, reg);
 		goto out;
 	}
 
-	/*
-	 * Restore 769_17.2 to its original value
-	 * Set page 769
-	 */
-	e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
-	                          (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
-
-	/* Clear 769_17.2 */
-	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
-	if (ret_val) {
-		e_dbg("Could not clear PHY page 769 bit 2\n");
-		goto out;
-	}
+	if (!page_set)
+		ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
 
 out:
 	return ret_val;
@@ -2792,11 +2869,12 @@
  *  semaphore before exiting.
  **/
 static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
-                                   bool locked)
+				   bool locked, bool page_set)
 {
 	s32 ret_val;
 	u16 page = BM_PHY_REG_PAGE(offset);
 	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
 	if (!locked) {
 		ret_val = hw->phy.ops.acquire(hw);
@@ -2806,8 +2884,8 @@
 
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
-		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
-		                                         data, true);
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, page_set);
 		goto out;
 	}
 
@@ -2817,26 +2895,25 @@
 		goto out;
 	}
 
-	hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
 
-	if (page == HV_INTC_FC_PAGE_START)
-		page = 0;
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
 
-	if (reg > MAX_PHY_MULTI_PAGE_REG) {
-		u32 phy_addr = hw->phy.addr;
+			hw->phy.addr = phy_addr;
 
-		hw->phy.addr = 1;
-
-		/* Page is shifted left, PHY expects (page x 32) */
-		ret_val = e1000e_write_phy_reg_mdic(hw,
-					     IGP01E1000_PHY_PAGE_SELECT,
-					     (page << IGP_PAGE_SHIFT));
-		hw->phy.addr = phy_addr;
-
-		if (ret_val)
-			goto out;
+			if (ret_val)
+				goto out;
+		}
 	}
 
+	e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
+
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
 	                                  data);
 out:
@@ -2858,7 +2935,7 @@
  **/
 s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
 {
-	return __e1000_read_phy_reg_hv(hw, offset, data, false);
+	return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
 }
 
 /**
@@ -2872,7 +2949,21 @@
  **/
 s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
 {
-	return __e1000_read_phy_reg_hv(hw, offset, data, true);
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_read_phy_reg_page_hv - Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
 }
 
 /**
@@ -2886,11 +2977,12 @@
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
 static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
-                                    bool locked)
+				    bool locked, bool page_set)
 {
 	s32 ret_val;
 	u16 page = BM_PHY_REG_PAGE(offset);
 	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
 	if (!locked) {
 		ret_val = hw->phy.ops.acquire(hw);
@@ -2900,8 +2992,8 @@
 
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
-		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
-		                                         &data, false);
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, page_set);
 		goto out;
 	}
 
@@ -2911,41 +3003,40 @@
 		goto out;
 	}
 
-	hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
 
-	if (page == HV_INTC_FC_PAGE_START)
-		page = 0;
+		/*
+		 * Workaround MDIO accesses being disabled after entering IEEE
+		 * Power Down (when bit 11 of the PHY Control register is set)
+		 */
+		if ((hw->phy.type == e1000_phy_82578) &&
+		    (hw->phy.revision >= 1) &&
+		    (hw->phy.addr == 2) &&
+		    ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+			u16 data2 = 0x7EFF;
+			ret_val = e1000_access_phy_debug_regs_hv(hw,
+								 (1 << 6) | 0x3,
+								 &data2, false);
+			if (ret_val)
+				goto out;
+		}
 
-	/*
-	 * Workaround MDIO accesses being disabled after entering IEEE Power
-	 * Down (whenever bit 11 of the PHY Control register is set)
-	 */
-	if ((hw->phy.type == e1000_phy_82578) &&
-	    (hw->phy.revision >= 1) &&
-	    (hw->phy.addr == 2) &&
-	    ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
-	    (data & (1 << 11))) {
-		u16 data2 = 0x7EFF;
-		ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
-		                                         &data2, false);
-		if (ret_val)
-			goto out;
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
+
+			hw->phy.addr = phy_addr;
+
+			if (ret_val)
+				goto out;
+		}
 	}
 
-	if (reg > MAX_PHY_MULTI_PAGE_REG) {
-		u32 phy_addr = hw->phy.addr;
-
-		hw->phy.addr = 1;
-
-		/* Page is shifted left, PHY expects (page x 32) */
-		ret_val = e1000e_write_phy_reg_mdic(hw,
-					     IGP01E1000_PHY_PAGE_SELECT,
-					     (page << IGP_PAGE_SHIFT));
-		hw->phy.addr = phy_addr;
-
-		if (ret_val)
-			goto out;
-	}
+	e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
 
 	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
 	                                  data);
@@ -2968,7 +3059,7 @@
  **/
 s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
 {
-	return __e1000_write_phy_reg_hv(hw, offset, data, false);
+	return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
 }
 
 /**
@@ -2982,7 +3073,21 @@
  **/
 s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
 {
-	return __e1000_write_phy_reg_hv(hw, offset, data, true);
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_write_phy_reg_page_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
 }
 
 /**
@@ -3004,11 +3109,12 @@
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read or written
  *  @data: pointer to the data to be read or written
- *  @read: determines if operation is read or written
+ *  @read: determines if operation is read or write
  *
  *  Reads the PHY register at offset and stores the retreived information
  *  in data.  Assumes semaphore already acquired.  Note that the procedure
- *  to read these regs uses the address port and data port to read/write.
+ *  to access these regs uses the address port and data port to read/write.
+ *  These accesses done with PHY address 2 and without using pages.
  **/
 static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
                                           u16 *data, bool read)
@@ -3028,7 +3134,7 @@
 	/* masking with 0x3F to remove the page from offset */
 	ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
 	if (ret_val) {
-		e_dbg("Could not write PHY the HV address register\n");
+		e_dbg("Could not write the Address Offset port register\n");
 		goto out;
 	}
 
@@ -3039,7 +3145,7 @@
 		ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
 
 	if (ret_val) {
-		e_dbg("Could not read data value from HV data register\n");
+		e_dbg("Could not access the Data port register\n");
 		goto out;
 	}
 
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 94ec973..d50a999 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -44,6 +44,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/delay.h>
 
 #include <asm/io.h>
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 38b351c..f747bce 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"2.1.1.13"
+#define DRV_VERSION		"2.1.1.20"
 #define DRV_COPYRIGHT		"Copyright 2008-2011 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
@@ -74,6 +74,7 @@
 	struct vnic_dev *vdev;
 	struct timer_list notify_timer;
 	struct work_struct reset;
+	struct work_struct change_mtu_work;
 	struct msix_entry msix_entry[ENIC_INTR_MAX];
 	struct enic_msix_entry msix[ENIC_INTR_MAX];
 	u32 msg_enable;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 2f433fb..4b3a93a 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -23,6 +23,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
@@ -152,12 +153,12 @@
 
 static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
 {
-	return rq;
+	return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
 }
 
 static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
 {
-	return enic->rq_count + wq;
+	return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
 }
 
 static inline unsigned int enic_msix_err_intr(struct enic *enic)
@@ -423,11 +424,18 @@
 
 	if (mtu && mtu != enic->port_mtu) {
 		enic->port_mtu = mtu;
-		if (mtu < netdev->mtu)
-			netdev_warn(netdev,
-				"interface MTU (%d) set higher "
-				"than switch port MTU (%d)\n",
-				netdev->mtu, mtu);
+		if (enic_is_dynamic(enic)) {
+			mtu = max_t(int, ENIC_MIN_MTU,
+				min_t(int, ENIC_MAX_MTU, mtu));
+			if (mtu != netdev->mtu)
+				schedule_work(&enic->change_mtu_work);
+		} else {
+			if (mtu < netdev->mtu)
+				netdev_warn(netdev,
+					"interface MTU (%d) set higher "
+					"than switch port MTU (%d)\n",
+					netdev->mtu, mtu);
+		}
 	}
 }
 
@@ -793,10 +801,10 @@
 }
 
 /* dev_base_lock rwlock held, nominally process context */
-static struct net_device_stats *enic_get_stats(struct net_device *netdev)
+static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+						struct rtnl_link_stats64 *net_stats)
 {
 	struct enic *enic = netdev_priv(netdev);
-	struct net_device_stats *net_stats = &netdev->stats;
 	struct vnic_stats *stats;
 
 	enic_dev_stats_dump(enic, &stats);
@@ -1258,8 +1266,7 @@
 
 		skb->dev = netdev;
 
-		if (enic->vlan_group && vlan_stripped &&
-			(vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
+		if (vlan_stripped) {
 
 			if (netdev->features & NETIF_F_GRO)
 				vlan_gro_receive(&enic->napi[q_number],
@@ -1560,7 +1567,7 @@
 	default:
 		/* Using intr for notification for INTx/MSI-X */
 		break;
-	};
+	}
 }
 
 /* rtnl lock is held, process context */
@@ -1688,6 +1695,9 @@
 	if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
 		return -EINVAL;
 
+	if (enic_is_dynamic(enic))
+		return -EOPNOTSUPP;
+
 	if (running)
 		enic_stop(netdev);
 
@@ -1704,6 +1714,55 @@
 	return 0;
 }
 
+static void enic_change_mtu_work(struct work_struct *work)
+{
+	struct enic *enic = container_of(work, struct enic, change_mtu_work);
+	struct net_device *netdev = enic->netdev;
+	int new_mtu = vnic_dev_mtu(enic->vdev);
+	int err;
+	unsigned int i;
+
+	new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
+
+	rtnl_lock();
+
+	/* Stop RQ */
+	del_timer_sync(&enic->notify_timer);
+
+	for (i = 0; i < enic->rq_count; i++)
+		napi_disable(&enic->napi[i]);
+
+	vnic_intr_mask(&enic->intr[0]);
+	enic_synchronize_irqs(enic);
+	err = vnic_rq_disable(&enic->rq[0]);
+	if (err) {
+		netdev_err(netdev, "Unable to disable RQ.\n");
+		return;
+	}
+	vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
+	vnic_cq_clean(&enic->cq[0]);
+	vnic_intr_clean(&enic->intr[0]);
+
+	/* Fill RQ with new_mtu-sized buffers */
+	netdev->mtu = new_mtu;
+	vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+	/* Need at least one buffer on ring to get going */
+	if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
+		netdev_err(netdev, "Unable to alloc receive buffers.\n");
+		return;
+	}
+
+	/* Start RQ */
+	vnic_rq_enable(&enic->rq[0]);
+	napi_enable(&enic->napi[0]);
+	vnic_intr_unmask(&enic->intr[0]);
+	enic_notify_timer_start(enic);
+
+	rtnl_unlock();
+
+	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void enic_poll_controller(struct net_device *netdev)
 {
@@ -1718,8 +1777,12 @@
 			enic_isr_msix_rq(enic->msix_entry[intr].vector,
 				&enic->napi[i]);
 		}
-		intr = enic_msix_wq_intr(enic, i);
-		enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+
+		for (i = 0; i < enic->wq_count; i++) {
+			intr = enic_msix_wq_intr(enic, i);
+			enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+		}
+
 		break;
 	case VNIC_DEV_INTR_MODE_MSI:
 		enic_isr_msi(enic->pdev->irq, enic);
@@ -2057,7 +2120,7 @@
 	.ndo_open		= enic_open,
 	.ndo_stop		= enic_stop,
 	.ndo_start_xmit		= enic_hard_start_xmit,
-	.ndo_get_stats		= enic_get_stats,
+	.ndo_get_stats64	= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= enic_set_rx_mode,
 	.ndo_set_multicast_list	= enic_set_rx_mode,
@@ -2079,7 +2142,7 @@
 	.ndo_open		= enic_open,
 	.ndo_stop		= enic_stop,
 	.ndo_start_xmit		= enic_hard_start_xmit,
-	.ndo_get_stats		= enic_get_stats,
+	.ndo_get_stats64	= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= enic_set_mac_address,
 	.ndo_set_rx_mode	= enic_set_rx_mode,
@@ -2345,6 +2408,7 @@
 	enic->notify_timer.data = (unsigned long)enic;
 
 	INIT_WORK(&enic->reset, enic_reset);
+	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
 
 	for (i = 0; i < enic->wq_count; i++)
 		spin_lock_init(&enic->wq_lock[i]);
@@ -2427,6 +2491,7 @@
 		struct enic *enic = netdev_priv(netdev);
 
 		cancel_work_sync(&enic->reset);
+		cancel_work_sync(&enic->change_mtu_work);
 		unregister_netdev(netdev);
 		enic_dev_deinit(enic);
 		vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 6e5c635..34f4207 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -97,11 +97,24 @@
 	dev_info(enic_get_dev(enic),
 		"vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
 		enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
-	dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
-		"tso %d intr timer %d usec rss %d\n",
-		ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
-		ENIC_SETTING(enic, TSO),
-		c->intr_timer_usec, ENIC_SETTING(enic, RSS));
+
+	dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+		"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
+		"loopback tag 0x%04x\n",
+		ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+		ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+		ENIC_SETTING(enic, TSO) ? "yes" : "no",
+		ENIC_SETTING(enic, LRO) ? "yes" : "no",
+		ENIC_SETTING(enic, RSS) ? "yes" : "no",
+		c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+		c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+		c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+		"unknown",
+		c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+		c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+		"unknown",
+		c->intr_timer_usec,
+		c->loop_tag);
 
 	return 0;
 }
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
index b86d6ef..0daa1c7 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/enic/vnic_cq.c
@@ -74,6 +74,8 @@
 	iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
 	iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
 	writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+
+	cq->interrupt_offset = interrupt_offset;
 }
 
 void vnic_cq_clean(struct vnic_cq *cq)
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
index 552d3da..579315c 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/enic/vnic_cq.h
@@ -57,6 +57,7 @@
 	struct vnic_dev_ring ring;
 	unsigned int to_clean;
 	unsigned int last_color;
+	unsigned int interrupt_offset;
 };
 
 static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index e8740e3..061ad87 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -51,4 +51,11 @@
 #define VENETF_RSSHASH_TCPIPV6_EX 0x400	/* Hash on TCP + IPv6 ext. fields */
 #define VENETF_LOOP		0x800	/* Loopback enabled */
 
+#define VENET_INTR_TYPE_MIN	0	/* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE	1	/* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY	0	/* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI	1	/* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX	2	/* Try INTx only */
+
 #endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 0ba5e7b..7a09575 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -54,6 +54,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a83dd31..0e8cc75 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -13,6 +13,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/crc32.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
@@ -874,6 +875,7 @@
 	}
 
 	spin_unlock_irq(&priv->lock);
+	skb_tx_timestamp(skb);
 out:
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 885d8ba..7ae3f28 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -324,6 +324,8 @@
 
 	fep->cur_tx = bdp;
 
+	skb_tx_timestamp(skb);
+
 	spin_unlock_irqrestore(&fep->hw_lock, flags);
 
 	return NETDEV_TX_OK;
@@ -650,7 +652,8 @@
 			skb_put(skb, pkt_len - 4);	/* Make room */
 			skb_copy_to_linear_data(skb, data, pkt_len - 4);
 			skb->protocol = eth_type_trans(skb, ndev);
-			netif_rx(skb);
+			if (!skb_defer_rx_timestamp(skb))
+				netif_rx(skb);
 		}
 
 		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 9f81b1a..cecc3b1 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/crc32.h>
 #include <linux/hardirq.h>
 #include <linux/delay.h>
@@ -335,6 +336,7 @@
 	bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
 				    DMA_TO_DEVICE);
 
+	skb_tx_timestamp(skb);
 	bcom_submit_next_buffer(priv->tx_dmatsk, skb);
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -434,7 +436,8 @@
 		length = status & BCOM_FEC_RX_BD_LEN_MASK;
 		skb_put(rskb, length - 4);	/* length without CRC32 */
 		rskb->protocol = eth_type_trans(rskb, dev);
-		netif_rx(rskb);
+		if (!skb_defer_rx_timestamp(skb))
+			netif_rx(rskb);
 
 		spin_lock(&priv->lock);
 	}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 21abb5c..329ef23 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -697,6 +697,8 @@
 		sc |= BD_ENET_TX_PAD;
 	CBDS_SC(bdp, sc);
 
+	skb_tx_timestamp(skb);
+
 	(*fep->ops->tx_kickstart)(dev);
 
 	spin_unlock_irqrestore(&fep->tx_lock, flags);
diff --git a/drivers/net/ftgmac100.c b/drivers/net/ftgmac100.c
new file mode 100644
index 0000000..54709af
--- /dev/null
+++ b/drivers/net/ftgmac100.c
@@ -0,0 +1,1365 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+
+#include "ftgmac100.h"
+
+#define DRV_NAME	"ftgmac100"
+#define DRV_VERSION	"0.7"
+
+#define RX_QUEUE_ENTRIES	256	/* must be power of 2 */
+#define TX_QUEUE_ENTRIES	512	/* must be power of 2 */
+
+#define MAX_PKT_SIZE		1518
+#define RX_BUF_SIZE		PAGE_SIZE	/* must be smaller than 0x3fff */
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftgmac100_descs {
+	struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+	struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftgmac100 {
+	struct resource *res;
+	void __iomem *base;
+	int irq;
+
+	struct ftgmac100_descs *descs;
+	dma_addr_t descs_dma_addr;
+
+	unsigned int rx_pointer;
+	unsigned int tx_clean_pointer;
+	unsigned int tx_pointer;
+	unsigned int tx_pending;
+
+	spinlock_t tx_lock;
+
+	struct net_device *netdev;
+	struct device *dev;
+	struct napi_struct napi;
+
+	struct mii_bus *mii_bus;
+	int phy_irq[PHY_MAX_ADDR];
+	struct phy_device *phydev;
+	int old_speed;
+};
+
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+				   struct ftgmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED	(FTGMAC100_INT_RPKT_LOST	| \
+				 FTGMAC100_INT_XPKT_ETH		| \
+				 FTGMAC100_INT_XPKT_LOST	| \
+				 FTGMAC100_INT_AHB_ERR		| \
+				 FTGMAC100_INT_PHYSTS_CHG	| \
+				 FTGMAC100_INT_RPKT_BUF		| \
+				 FTGMAC100_INT_NO_RXBUF)
+
+static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
+{
+	iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
+		unsigned int size)
+{
+	size = FTGMAC100_RBSR_SIZE(size);
+	iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
+}
+
+static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
+						   dma_addr_t addr)
+{
+	iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
+}
+
+static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
+{
+	iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
+}
+
+static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	int i;
+
+	/* NOTE: reset clears all registers */
+	iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
+	for (i = 0; i < 5; i++) {
+		unsigned int maccr;
+
+		maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+		if (!(maccr & FTGMAC100_MACCR_SW_RST))
+			return 0;
+
+		udelay(1000);
+	}
+
+	netdev_err(netdev, "software reset failed\n");
+	return -EIO;
+}
+
+static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+{
+	unsigned int maddr = mac[0] << 8 | mac[1];
+	unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+	iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
+	iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
+}
+
+static void ftgmac100_init_hw(struct ftgmac100 *priv)
+{
+	/* setup ring buffer base registers */
+	ftgmac100_set_rx_ring_base(priv,
+				   priv->descs_dma_addr +
+				   offsetof(struct ftgmac100_descs, rxdes));
+	ftgmac100_set_normal_prio_tx_ring_base(priv,
+					       priv->descs_dma_addr +
+					       offsetof(struct ftgmac100_descs, txdes));
+
+	ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
+
+	iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
+
+	ftgmac100_set_mac(priv, priv->netdev->dev_addr);
+}
+
+#define MACCR_ENABLE_ALL	(FTGMAC100_MACCR_TXDMA_EN	| \
+				 FTGMAC100_MACCR_RXDMA_EN	| \
+				 FTGMAC100_MACCR_TXMAC_EN	| \
+				 FTGMAC100_MACCR_RXMAC_EN	| \
+				 FTGMAC100_MACCR_FULLDUP	| \
+				 FTGMAC100_MACCR_CRC_APD	| \
+				 FTGMAC100_MACCR_RX_RUNT	| \
+				 FTGMAC100_MACCR_RX_BROADPKT)
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
+{
+	int maccr = MACCR_ENABLE_ALL;
+
+	switch (speed) {
+	default:
+	case 10:
+		break;
+
+	case 100:
+		maccr |= FTGMAC100_MACCR_FAST_MODE;
+		break;
+
+	case 1000:
+		maccr |= FTGMAC100_MACCR_GIGA_MODE;
+		break;
+	}
+
+	iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+static void ftgmac100_stop_hw(struct ftgmac100 *priv)
+{
+	iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
+}
+
+static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
+}
+
+static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
+}
+
+static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+{
+	/* clear status bits */
+	rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
+}
+
+static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
+}
+
+static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
+{
+	return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
+}
+
+static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
+}
+
+static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+{
+	rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
+					 dma_addr_t addr)
+{
+	rxdes->rxdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
+{
+	return le32_to_cpu(rxdes->rxdes3);
+}
+
+static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
+{
+	return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+	       cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
+}
+
+static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
+{
+	return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+	       cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
+}
+
+static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
+{
+	return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
+}
+
+/*
+ * rxdes2 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+{
+	rxdes->rxdes2 = (unsigned int)page;
+}
+
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+{
+	return (struct page *)rxdes->rxdes2;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftgmac100_next_rx_pointer(int pointer)
+{
+	return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
+{
+	priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
+{
+	return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftgmac100_rxdes *
+ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+{
+	struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+
+	while (ftgmac100_rxdes_packet_ready(rxdes)) {
+		if (ftgmac100_rxdes_first_segment(rxdes))
+			return rxdes;
+
+		ftgmac100_rxdes_set_dma_own(rxdes);
+		ftgmac100_rx_pointer_advance(priv);
+		rxdes = ftgmac100_current_rxdes(priv);
+	}
+
+	return NULL;
+}
+
+static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
+				      struct ftgmac100_rxdes *rxdes)
+{
+	struct net_device *netdev = priv->netdev;
+	bool error = false;
+
+	if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx err\n");
+
+		netdev->stats.rx_errors++;
+		error = true;
+	}
+
+	if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx crc err\n");
+
+		netdev->stats.rx_crc_errors++;
+		error = true;
+	} else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx IP checksum err\n");
+
+		error = true;
+	}
+
+	if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx frame too long\n");
+
+		netdev->stats.rx_length_errors++;
+		error = true;
+	} else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx runt\n");
+
+		netdev->stats.rx_length_errors++;
+		error = true;
+	} else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
+		if (net_ratelimit())
+			netdev_info(netdev, "rx odd nibble\n");
+
+		netdev->stats.rx_length_errors++;
+		error = true;
+	}
+
+	return error;
+}
+
+static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+	bool done = false;
+
+	if (net_ratelimit())
+		netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+	do {
+		if (ftgmac100_rxdes_last_segment(rxdes))
+			done = true;
+
+		ftgmac100_rxdes_set_dma_own(rxdes);
+		ftgmac100_rx_pointer_advance(priv);
+		rxdes = ftgmac100_current_rxdes(priv);
+	} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+
+	netdev->stats.rx_dropped++;
+}
+
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+{
+	struct net_device *netdev = priv->netdev;
+	struct ftgmac100_rxdes *rxdes;
+	struct sk_buff *skb;
+	bool done = false;
+
+	rxdes = ftgmac100_rx_locate_first_segment(priv);
+	if (!rxdes)
+		return false;
+
+	if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
+		ftgmac100_rx_drop_packet(priv);
+		return true;
+	}
+
+	/* start processing */
+	skb = netdev_alloc_skb_ip_align(netdev, 128);
+	if (unlikely(!skb)) {
+		if (net_ratelimit())
+			netdev_err(netdev, "rx skb alloc failed\n");
+
+		ftgmac100_rx_drop_packet(priv);
+		return true;
+	}
+
+	if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+		netdev->stats.multicast++;
+
+	/*
+	 * It seems that HW does checksum incorrectly with fragmented packets,
+	 * so we are conservative here - if HW checksum error, let software do
+	 * the checksum again.
+	 */
+	if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
+	    (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	do {
+		dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+		struct page *page = ftgmac100_rxdes_get_page(rxdes);
+		unsigned int size;
+
+		dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+		size = ftgmac100_rxdes_data_length(rxdes);
+		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+
+		skb->len += size;
+		skb->data_len += size;
+		skb->truesize += size;
+
+		if (ftgmac100_rxdes_last_segment(rxdes))
+			done = true;
+
+		ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+		ftgmac100_rx_pointer_advance(priv);
+		rxdes = ftgmac100_current_rxdes(priv);
+	} while (!done);
+
+	__pskb_pull_tail(skb, min(skb->len, 64U));
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	netdev->stats.rx_packets++;
+	netdev->stats.rx_bytes += skb->len;
+
+	/* push packet to protocol stack */
+	napi_gro_receive(&priv->napi, skb);
+
+	(*processed)++;
+	return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+{
+	/* clear all except end of ring bit */
+	txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+	txdes->txdes1 = 0;
+	txdes->txdes2 = 0;
+	txdes->txdes3 = 0;
+}
+
+static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+{
+	return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+{
+	/*
+	 * Make sure dma own bit will not be set before any other
+	 * descriptor fields.
+	 */
+	wmb();
+	txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+}
+
+static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
+}
+
+static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
+}
+
+static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
+					    unsigned int len)
+{
+	txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+}
+
+static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
+}
+
+static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
+{
+	txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
+					 dma_addr_t addr)
+{
+	txdes->txdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
+{
+	return le32_to_cpu(txdes->txdes3);
+}
+
+/*
+ * txdes2 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
+				    struct sk_buff *skb)
+{
+	txdes->txdes2 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
+{
+	return (struct sk_buff *)txdes->txdes2;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftgmac100_next_tx_pointer(int pointer)
+{
+	return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+{
+	priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
+{
+	priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+{
+	return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftgmac100_txdes *
+ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
+{
+	return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct ftgmac100_txdes *txdes;
+	struct sk_buff *skb;
+	dma_addr_t map;
+
+	if (priv->tx_pending == 0)
+		return false;
+
+	txdes = ftgmac100_current_clean_txdes(priv);
+
+	if (ftgmac100_txdes_owned_by_dma(txdes))
+		return false;
+
+	skb = ftgmac100_txdes_get_skb(txdes);
+	map = ftgmac100_txdes_get_dma_addr(txdes);
+
+	netdev->stats.tx_packets++;
+	netdev->stats.tx_bytes += skb->len;
+
+	dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+
+	dev_kfree_skb(skb);
+
+	ftgmac100_txdes_reset(txdes);
+
+	ftgmac100_tx_clean_pointer_advance(priv);
+
+	spin_lock(&priv->tx_lock);
+	priv->tx_pending--;
+	spin_unlock(&priv->tx_lock);
+	netif_wake_queue(netdev);
+
+	return true;
+}
+
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
+{
+	while (ftgmac100_tx_complete_packet(priv))
+		;
+}
+
+static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
+			  dma_addr_t map)
+{
+	struct net_device *netdev = priv->netdev;
+	struct ftgmac100_txdes *txdes;
+	unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+	txdes = ftgmac100_current_txdes(priv);
+	ftgmac100_tx_pointer_advance(priv);
+
+	/* setup TX descriptor */
+	ftgmac100_txdes_set_skb(txdes, skb);
+	ftgmac100_txdes_set_dma_addr(txdes, map);
+	ftgmac100_txdes_set_buffer_size(txdes, len);
+
+	ftgmac100_txdes_set_first_segment(txdes);
+	ftgmac100_txdes_set_last_segment(txdes);
+	ftgmac100_txdes_set_txint(txdes);
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		__be16 protocol = skb->protocol;
+
+		if (protocol == cpu_to_be16(ETH_P_IP)) {
+			u8 ip_proto = ip_hdr(skb)->protocol;
+
+			ftgmac100_txdes_set_ipcs(txdes);
+			if (ip_proto == IPPROTO_TCP)
+				ftgmac100_txdes_set_tcpcs(txdes);
+			else if (ip_proto == IPPROTO_UDP)
+				ftgmac100_txdes_set_udpcs(txdes);
+		}
+	}
+
+	spin_lock(&priv->tx_lock);
+	priv->tx_pending++;
+	if (priv->tx_pending == TX_QUEUE_ENTRIES)
+		netif_stop_queue(netdev);
+
+	/* start transmit */
+	ftgmac100_txdes_set_dma_own(txdes);
+	spin_unlock(&priv->tx_lock);
+
+	ftgmac100_txdma_normal_prio_start_polling(priv);
+
+	return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+				   struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+{
+	struct net_device *netdev = priv->netdev;
+	struct page *page;
+	dma_addr_t map;
+
+	page = alloc_page(gfp);
+	if (!page) {
+		if (net_ratelimit())
+			netdev_err(netdev, "failed to allocate rx page\n");
+		return -ENOMEM;
+	}
+
+	map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(priv->dev, map))) {
+		if (net_ratelimit())
+			netdev_err(netdev, "failed to map rx page\n");
+		__free_page(page);
+		return -ENOMEM;
+	}
+
+	ftgmac100_rxdes_set_page(rxdes, page);
+	ftgmac100_rxdes_set_dma_addr(rxdes, map);
+	ftgmac100_rxdes_set_dma_own(rxdes);
+	return 0;
+}
+
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+{
+	int i;
+
+	for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+		struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+		struct page *page = ftgmac100_rxdes_get_page(rxdes);
+		dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+
+		if (!page)
+			continue;
+
+		dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+		__free_page(page);
+	}
+
+	for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+		struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
+		struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
+		dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
+
+		if (!skb)
+			continue;
+
+		dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+		dev_kfree_skb(skb);
+	}
+
+	dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
+			  priv->descs, priv->descs_dma_addr);
+}
+
+static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+{
+	int i;
+
+	priv->descs = dma_alloc_coherent(priv->dev,
+					 sizeof(struct ftgmac100_descs),
+					 &priv->descs_dma_addr, GFP_KERNEL);
+	if (!priv->descs)
+		return -ENOMEM;
+
+	memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
+
+	/* initialize RX ring */
+	ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+	for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+		struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+		if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+			goto err;
+	}
+
+	/* initialize TX ring */
+	ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+	return 0;
+
+err:
+	ftgmac100_free_buffers(priv);
+	return -ENOMEM;
+}
+
+/******************************************************************************
+ * internal functions (mdio)
+ *****************************************************************************/
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+	struct phy_device *phydev = priv->phydev;
+	int ier;
+
+	if (phydev->speed == priv->old_speed)
+		return;
+
+	priv->old_speed = phydev->speed;
+
+	ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+
+	/* disable all interrupts */
+	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+	netif_stop_queue(netdev);
+	ftgmac100_stop_hw(priv);
+
+	netif_start_queue(netdev);
+	ftgmac100_init_hw(priv);
+	ftgmac100_start_hw(priv, phydev->speed);
+
+	/* re-enable interrupts */
+	iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+}
+
+static int ftgmac100_mii_probe(struct ftgmac100 *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct phy_device *phydev = NULL;
+	int i;
+
+	/* search for connect PHY device */
+	for (i = 0; i < PHY_MAX_ADDR; i++) {
+		struct phy_device *tmp = priv->mii_bus->phy_map[i];
+
+		if (tmp) {
+			phydev = tmp;
+			break;
+		}
+	}
+
+	/* now we are supposed to have a proper phydev, to attach to... */
+	if (!phydev) {
+		netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+		return -ENODEV;
+	}
+
+	phydev = phy_connect(netdev, dev_name(&phydev->dev),
+			     &ftgmac100_adjust_link, 0,
+			     PHY_INTERFACE_MODE_GMII);
+
+	if (IS_ERR(phydev)) {
+		netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+		return PTR_ERR(phydev);
+	}
+
+	priv->phydev = phydev;
+	return 0;
+}
+
+/******************************************************************************
+ * struct mii_bus functions
+ *****************************************************************************/
+static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+	struct net_device *netdev = bus->priv;
+	struct ftgmac100 *priv = netdev_priv(netdev);
+	unsigned int phycr;
+	int i;
+
+	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+	/* preserve MDC cycle threshold */
+	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+		 FTGMAC100_PHYCR_REGAD(regnum) |
+		 FTGMAC100_PHYCR_MIIRD;
+
+	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+	for (i = 0; i < 10; i++) {
+		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+		if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
+			int data;
+
+			data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
+			return FTGMAC100_PHYDATA_MIIRDATA(data);
+		}
+
+		udelay(100);
+	}
+
+	netdev_err(netdev, "mdio read timed out\n");
+	return -EIO;
+}
+
+static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
+				   int regnum, u16 value)
+{
+	struct net_device *netdev = bus->priv;
+	struct ftgmac100 *priv = netdev_priv(netdev);
+	unsigned int phycr;
+	int data;
+	int i;
+
+	phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+	/* preserve MDC cycle threshold */
+	phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+	phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+		 FTGMAC100_PHYCR_REGAD(regnum) |
+		 FTGMAC100_PHYCR_MIIWR;
+
+	data = FTGMAC100_PHYDATA_MIIWDATA(value);
+
+	iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
+	iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+	for (i = 0; i < 10; i++) {
+		phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+		if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
+			return 0;
+
+		udelay(100);
+	}
+
+	netdev_err(netdev, "mdio write timed out\n");
+	return -EIO;
+}
+
+static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
+{
+	return 0;
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftgmac100_get_drvinfo(struct net_device *netdev,
+				  struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftgmac100_get_settings(struct net_device *netdev,
+				  struct ethtool_cmd *cmd)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+
+	return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int ftgmac100_set_settings(struct net_device *netdev,
+				  struct ethtool_cmd *cmd)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+
+	return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static const struct ethtool_ops ftgmac100_ethtool_ops = {
+	.set_settings		= ftgmac100_set_settings,
+	.get_settings		= ftgmac100_get_settings,
+	.get_drvinfo		= ftgmac100_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct ftgmac100 *priv = netdev_priv(netdev);
+
+	if (likely(netif_running(netdev))) {
+		/* Disable interrupts for polling */
+		iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+		napi_schedule(&priv->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftgmac100_poll(struct napi_struct *napi, int budget)
+{
+	struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
+	struct net_device *netdev = priv->netdev;
+	unsigned int status;
+	bool completed = true;
+	int rx = 0;
+
+	status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+	iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
+	if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
+		/*
+		 * FTGMAC100_INT_RPKT_BUF:
+		 *	RX DMA has received packets into RX buffer successfully
+		 *
+		 * FTGMAC100_INT_NO_RXBUF:
+		 *	RX buffer unavailable
+		 */
+		bool retry;
+
+		do {
+			retry = ftgmac100_rx_packet(priv, &rx);
+		} while (retry && rx < budget);
+
+		if (retry && rx == budget)
+			completed = false;
+	}
+
+	if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
+		/*
+		 * FTGMAC100_INT_XPKT_ETH:
+		 *	packet transmitted to ethernet successfully
+		 *
+		 * FTGMAC100_INT_XPKT_LOST:
+		 *	packet transmitted to ethernet lost due to late
+		 *	collision or excessive collision
+		 */
+		ftgmac100_tx_complete(priv);
+	}
+
+	if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
+		      FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
+		if (net_ratelimit())
+			netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+				    status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
+				    status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+				    status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+				    status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+		if (status & FTGMAC100_INT_NO_RXBUF) {
+			/* RX buffer unavailable */
+			netdev->stats.rx_over_errors++;
+		}
+
+		if (status & FTGMAC100_INT_RPKT_LOST) {
+			/* received packet lost due to RX FIFO full */
+			netdev->stats.rx_fifo_errors++;
+		}
+	}
+
+	if (completed) {
+		napi_complete(napi);
+
+		/* enable all interrupts */
+		iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+	}
+
+	return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftgmac100_open(struct net_device *netdev)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+	int err;
+
+	err = ftgmac100_alloc_buffers(priv);
+	if (err) {
+		netdev_err(netdev, "failed to allocate buffers\n");
+		goto err_alloc;
+	}
+
+	err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+	if (err) {
+		netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+		goto err_irq;
+	}
+
+	priv->rx_pointer = 0;
+	priv->tx_clean_pointer = 0;
+	priv->tx_pointer = 0;
+	priv->tx_pending = 0;
+
+	err = ftgmac100_reset_hw(priv);
+	if (err)
+		goto err_hw;
+
+	ftgmac100_init_hw(priv);
+	ftgmac100_start_hw(priv, 10);
+
+	phy_start(priv->phydev);
+
+	napi_enable(&priv->napi);
+	netif_start_queue(netdev);
+
+	/* enable all interrupts */
+	iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+	return 0;
+
+err_hw:
+	free_irq(priv->irq, netdev);
+err_irq:
+	ftgmac100_free_buffers(priv);
+err_alloc:
+	return err;
+}
+
+static int ftgmac100_stop(struct net_device *netdev)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+
+	/* disable all interrupts */
+	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+	netif_stop_queue(netdev);
+	napi_disable(&priv->napi);
+	phy_stop(priv->phydev);
+
+	ftgmac100_stop_hw(priv);
+	free_irq(priv->irq, netdev);
+	ftgmac100_free_buffers(priv);
+
+	return 0;
+}
+
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+				     struct net_device *netdev)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+	dma_addr_t map;
+
+	if (unlikely(skb->len > MAX_PKT_SIZE)) {
+		if (net_ratelimit())
+			netdev_dbg(netdev, "tx packet too big\n");
+
+		netdev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(priv->dev, map))) {
+		/* drop packet */
+		if (net_ratelimit())
+			netdev_err(netdev, "map socket buffer failed\n");
+
+		netdev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	return ftgmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct ftgmac100 *priv = netdev_priv(netdev);
+
+	return phy_mii_ioctl(priv->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops ftgmac100_netdev_ops = {
+	.ndo_open		= ftgmac100_open,
+	.ndo_stop		= ftgmac100_stop,
+	.ndo_start_xmit		= ftgmac100_hard_start_xmit,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= ftgmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftgmac100_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int irq;
+	struct net_device *netdev;
+	struct ftgmac100 *priv;
+	int err;
+	int i;
+
+	if (!pdev)
+		return -ENODEV;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENXIO;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	/* setup net_device */
+	netdev = alloc_etherdev(sizeof(*priv));
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+	netdev->netdev_ops = &ftgmac100_netdev_ops;
+	netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
+
+	platform_set_drvdata(pdev, netdev);
+
+	/* setup private data */
+	priv = netdev_priv(netdev);
+	priv->netdev = netdev;
+	priv->dev = &pdev->dev;
+
+	spin_lock_init(&priv->tx_lock);
+
+	/* initialize NAPI */
+	netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+
+	/* map io memory */
+	priv->res = request_mem_region(res->start, resource_size(res),
+				       dev_name(&pdev->dev));
+	if (!priv->res) {
+		dev_err(&pdev->dev, "Could not reserve memory region\n");
+		err = -ENOMEM;
+		goto err_req_mem;
+	}
+
+	priv->base = ioremap(res->start, resource_size(res));
+	if (!priv->base) {
+		dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	priv->irq = irq;
+
+	/* initialize mdio bus */
+	priv->mii_bus = mdiobus_alloc();
+	if (!priv->mii_bus) {
+		err = -EIO;
+		goto err_alloc_mdiobus;
+	}
+
+	priv->mii_bus->name = "ftgmac100_mdio";
+	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
+
+	priv->mii_bus->priv = netdev;
+	priv->mii_bus->read = ftgmac100_mdiobus_read;
+	priv->mii_bus->write = ftgmac100_mdiobus_write;
+	priv->mii_bus->reset = ftgmac100_mdiobus_reset;
+	priv->mii_bus->irq = priv->phy_irq;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		priv->mii_bus->irq[i] = PHY_POLL;
+
+	err = mdiobus_register(priv->mii_bus);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+		goto err_register_mdiobus;
+	}
+
+	err = ftgmac100_mii_probe(priv);
+	if (err) {
+		dev_err(&pdev->dev, "MII Probe failed!\n");
+		goto err_mii_probe;
+	}
+
+	/* register network device */
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to register netdev\n");
+		goto err_register_netdev;
+	}
+
+	netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		random_ether_addr(netdev->dev_addr);
+		netdev_info(netdev, "generated random MAC address %pM\n",
+			    netdev->dev_addr);
+	}
+
+	return 0;
+
+err_register_netdev:
+	phy_disconnect(priv->phydev);
+err_mii_probe:
+	mdiobus_unregister(priv->mii_bus);
+err_register_mdiobus:
+	mdiobus_free(priv->mii_bus);
+err_alloc_mdiobus:
+	iounmap(priv->base);
+err_ioremap:
+	release_resource(priv->res);
+err_req_mem:
+	netif_napi_del(&priv->napi);
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+err_alloc_etherdev:
+	return err;
+}
+
+static int __exit ftgmac100_remove(struct platform_device *pdev)
+{
+	struct net_device *netdev;
+	struct ftgmac100 *priv;
+
+	netdev = platform_get_drvdata(pdev);
+	priv = netdev_priv(netdev);
+
+	unregister_netdev(netdev);
+
+	phy_disconnect(priv->phydev);
+	mdiobus_unregister(priv->mii_bus);
+	mdiobus_free(priv->mii_bus);
+
+	iounmap(priv->base);
+	release_resource(priv->res);
+
+	netif_napi_del(&priv->napi);
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(netdev);
+	return 0;
+}
+
+static struct platform_driver ftgmac100_driver = {
+	.probe		= ftgmac100_probe,
+	.remove		= __exit_p(ftgmac100_remove),
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftgmac100_init(void)
+{
+	pr_info("Loading version " DRV_VERSION " ...\n");
+	return platform_driver_register(&ftgmac100_driver);
+}
+
+static void __exit ftgmac100_exit(void)
+{
+	platform_driver_unregister(&ftgmac100_driver);
+}
+
+module_init(ftgmac100_init);
+module_exit(ftgmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTGMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftgmac100.h b/drivers/net/ftgmac100.h
new file mode 100644
index 0000000..13408d4
--- /dev/null
+++ b/drivers/net/ftgmac100.h
@@ -0,0 +1,246 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTGMAC100_H
+#define __FTGMAC100_H
+
+#define FTGMAC100_OFFSET_ISR		0x00
+#define FTGMAC100_OFFSET_IER		0x04
+#define FTGMAC100_OFFSET_MAC_MADR	0x08
+#define FTGMAC100_OFFSET_MAC_LADR	0x0c
+#define FTGMAC100_OFFSET_MAHT0		0x10
+#define FTGMAC100_OFFSET_MAHT1		0x14
+#define FTGMAC100_OFFSET_NPTXPD		0x18
+#define FTGMAC100_OFFSET_RXPD		0x1c
+#define FTGMAC100_OFFSET_NPTXR_BADR	0x20
+#define FTGMAC100_OFFSET_RXR_BADR	0x24
+#define FTGMAC100_OFFSET_HPTXPD		0x28
+#define FTGMAC100_OFFSET_HPTXR_BADR	0x2c
+#define FTGMAC100_OFFSET_ITC		0x30
+#define FTGMAC100_OFFSET_APTC		0x34
+#define FTGMAC100_OFFSET_DBLAC		0x38
+#define FTGMAC100_OFFSET_DMAFIFOS	0x3c
+#define FTGMAC100_OFFSET_REVR		0x40
+#define FTGMAC100_OFFSET_FEAR		0x44
+#define FTGMAC100_OFFSET_TPAFCR		0x48
+#define FTGMAC100_OFFSET_RBSR		0x4c
+#define FTGMAC100_OFFSET_MACCR		0x50
+#define FTGMAC100_OFFSET_MACSR		0x54
+#define FTGMAC100_OFFSET_TM		0x58
+#define FTGMAC100_OFFSET_PHYCR		0x60
+#define FTGMAC100_OFFSET_PHYDATA	0x64
+#define FTGMAC100_OFFSET_FCR		0x68
+#define FTGMAC100_OFFSET_BPR		0x6c
+#define FTGMAC100_OFFSET_WOLCR		0x70
+#define FTGMAC100_OFFSET_WOLSR		0x74
+#define FTGMAC100_OFFSET_WFCRC		0x78
+#define FTGMAC100_OFFSET_WFBM1		0x80
+#define FTGMAC100_OFFSET_WFBM2		0x84
+#define FTGMAC100_OFFSET_WFBM3		0x88
+#define FTGMAC100_OFFSET_WFBM4		0x8c
+#define FTGMAC100_OFFSET_NPTXR_PTR	0x90
+#define FTGMAC100_OFFSET_HPTXR_PTR	0x94
+#define FTGMAC100_OFFSET_RXR_PTR	0x98
+#define FTGMAC100_OFFSET_TX		0xa0
+#define FTGMAC100_OFFSET_TX_MCOL_SCOL	0xa4
+#define FTGMAC100_OFFSET_TX_ECOL_FAIL	0xa8
+#define FTGMAC100_OFFSET_TX_LCOL_UND	0xac
+#define FTGMAC100_OFFSET_RX		0xb0
+#define FTGMAC100_OFFSET_RX_BC		0xb4
+#define FTGMAC100_OFFSET_RX_MC		0xb8
+#define FTGMAC100_OFFSET_RX_PF_AEP	0xbc
+#define FTGMAC100_OFFSET_RX_RUNT	0xc0
+#define FTGMAC100_OFFSET_RX_CRCER_FTL	0xc4
+#define FTGMAC100_OFFSET_RX_COL_LOST	0xc8
+
+/*
+ * Interrupt status register & interrupt enable register
+ */
+#define FTGMAC100_INT_RPKT_BUF		(1 << 0)
+#define FTGMAC100_INT_RPKT_FIFO		(1 << 1)
+#define FTGMAC100_INT_NO_RXBUF		(1 << 2)
+#define FTGMAC100_INT_RPKT_LOST		(1 << 3)
+#define FTGMAC100_INT_XPKT_ETH		(1 << 4)
+#define FTGMAC100_INT_XPKT_FIFO		(1 << 5)
+#define FTGMAC100_INT_NO_NPTXBUF	(1 << 6)
+#define FTGMAC100_INT_XPKT_LOST		(1 << 7)
+#define FTGMAC100_INT_AHB_ERR		(1 << 8)
+#define FTGMAC100_INT_PHYSTS_CHG	(1 << 9)
+#define FTGMAC100_INT_NO_HPTXBUF	(1 << 10)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTGMAC100_ITC_RXINT_CNT(x)	(((x) & 0xf) << 0)
+#define FTGMAC100_ITC_RXINT_THR(x)	(((x) & 0x7) << 4)
+#define FTGMAC100_ITC_RXINT_TIME_SEL	(1 << 7)
+#define FTGMAC100_ITC_TXINT_CNT(x)	(((x) & 0xf) << 8)
+#define FTGMAC100_ITC_TXINT_THR(x)	(((x) & 0x7) << 12)
+#define FTGMAC100_ITC_TXINT_TIME_SEL	(1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define FTGMAC100_APTC_RXPOLL_CNT(x)	(((x) & 0xf) << 0)
+#define FTGMAC100_APTC_RXPOLL_TIME_SEL	(1 << 4)
+#define FTGMAC100_APTC_TXPOLL_CNT(x)	(((x) & 0xf) << 8)
+#define FTGMAC100_APTC_TXPOLL_TIME_SEL	(1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTGMAC100_DBLAC_RXFIFO_LTHR(x)	(((x) & 0x7) << 0)
+#define FTGMAC100_DBLAC_RXFIFO_HTHR(x)	(((x) & 0x7) << 3)
+#define FTGMAC100_DBLAC_RX_THR_EN	(1 << 6)
+#define FTGMAC100_DBLAC_RXBURST_SIZE(x)	(((x) & 0x3) << 8)
+#define FTGMAC100_DBLAC_TXBURST_SIZE(x)	(((x) & 0x3) << 10)
+#define FTGMAC100_DBLAC_RXDES_SIZE(x)	(((x) & 0xf) << 12)
+#define FTGMAC100_DBLAC_TXDES_SIZE(x)	(((x) & 0xf) << 16)
+#define FTGMAC100_DBLAC_IFG_CNT(x)	(((x) & 0x7) << 20)
+#define FTGMAC100_DBLAC_IFG_INC		(1 << 23)
+
+/*
+ * DMA FIFO status register
+ */
+#define FTGMAC100_DMAFIFOS_RXDMA1_SM(dmafifos)	((dmafifos) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA2_SM(dmafifos)	(((dmafifos) >> 4) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA3_SM(dmafifos)	(((dmafifos) >> 8) & 0x7)
+#define FTGMAC100_DMAFIFOS_TXDMA1_SM(dmafifos)	(((dmafifos) >> 12) & 0xf)
+#define FTGMAC100_DMAFIFOS_TXDMA2_SM(dmafifos)	(((dmafifos) >> 16) & 0x3)
+#define FTGMAC100_DMAFIFOS_TXDMA3_SM(dmafifos)	(((dmafifos) >> 18) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXFIFO_EMPTY		(1 << 26)
+#define FTGMAC100_DMAFIFOS_TXFIFO_EMPTY		(1 << 27)
+#define FTGMAC100_DMAFIFOS_RXDMA_GRANT		(1 << 28)
+#define FTGMAC100_DMAFIFOS_TXDMA_GRANT		(1 << 29)
+#define FTGMAC100_DMAFIFOS_RXDMA_REQ		(1 << 30)
+#define FTGMAC100_DMAFIFOS_TXDMA_REQ		(1 << 31)
+
+/*
+ * Receive buffer size register
+ */
+#define FTGMAC100_RBSR_SIZE(x)		((x) & 0x3fff)
+
+/*
+ * MAC control register
+ */
+#define FTGMAC100_MACCR_TXDMA_EN	(1 << 0)
+#define FTGMAC100_MACCR_RXDMA_EN	(1 << 1)
+#define FTGMAC100_MACCR_TXMAC_EN	(1 << 2)
+#define FTGMAC100_MACCR_RXMAC_EN	(1 << 3)
+#define FTGMAC100_MACCR_RM_VLAN		(1 << 4)
+#define FTGMAC100_MACCR_HPTXR_EN	(1 << 5)
+#define FTGMAC100_MACCR_LOOP_EN		(1 << 6)
+#define FTGMAC100_MACCR_ENRX_IN_HALFTX	(1 << 7)
+#define FTGMAC100_MACCR_FULLDUP		(1 << 8)
+#define FTGMAC100_MACCR_GIGA_MODE	(1 << 9)
+#define FTGMAC100_MACCR_CRC_APD		(1 << 10)
+#define FTGMAC100_MACCR_RX_RUNT		(1 << 12)
+#define FTGMAC100_MACCR_JUMBO_LF	(1 << 13)
+#define FTGMAC100_MACCR_RX_ALL		(1 << 14)
+#define FTGMAC100_MACCR_HT_MULTI_EN	(1 << 15)
+#define FTGMAC100_MACCR_RX_MULTIPKT	(1 << 16)
+#define FTGMAC100_MACCR_RX_BROADPKT	(1 << 17)
+#define FTGMAC100_MACCR_DISCARD_CRCERR	(1 << 18)
+#define FTGMAC100_MACCR_FAST_MODE	(1 << 19)
+#define FTGMAC100_MACCR_SW_RST		(1 << 31)
+
+/*
+ * PHY control register
+ */
+#define FTGMAC100_PHYCR_MDC_CYCTHR_MASK	0x3f
+#define FTGMAC100_PHYCR_MDC_CYCTHR(x)	((x) & 0x3f)
+#define FTGMAC100_PHYCR_PHYAD(x)	(((x) & 0x1f) << 16)
+#define FTGMAC100_PHYCR_REGAD(x)	(((x) & 0x1f) << 21)
+#define FTGMAC100_PHYCR_MIIRD		(1 << 26)
+#define FTGMAC100_PHYCR_MIIWR		(1 << 27)
+
+/*
+ * PHY data register
+ */
+#define FTGMAC100_PHYDATA_MIIWDATA(x)		((x) & 0xffff)
+#define FTGMAC100_PHYDATA_MIIRDATA(phydata)	(((phydata) >> 16) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_txdes {
+	unsigned int	txdes0;
+	unsigned int	txdes1;
+	unsigned int	txdes2;	/* not used by HW */
+	unsigned int	txdes3;	/* TXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_TXDES0_TXBUF_SIZE(x)	((x) & 0x3fff)
+#define FTGMAC100_TXDES0_EDOTR		(1 << 15)
+#define FTGMAC100_TXDES0_CRC_ERR	(1 << 19)
+#define FTGMAC100_TXDES0_LTS		(1 << 28)
+#define FTGMAC100_TXDES0_FTS		(1 << 29)
+#define FTGMAC100_TXDES0_TXDMA_OWN	(1 << 31)
+
+#define FTGMAC100_TXDES1_VLANTAG_CI(x)	((x) & 0xffff)
+#define FTGMAC100_TXDES1_INS_VLANTAG	(1 << 16)
+#define FTGMAC100_TXDES1_TCP_CHKSUM	(1 << 17)
+#define FTGMAC100_TXDES1_UDP_CHKSUM	(1 << 18)
+#define FTGMAC100_TXDES1_IP_CHKSUM	(1 << 19)
+#define FTGMAC100_TXDES1_LLC		(1 << 22)
+#define FTGMAC100_TXDES1_TX2FIC		(1 << 30)
+#define FTGMAC100_TXDES1_TXIC		(1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_rxdes {
+	unsigned int	rxdes0;
+	unsigned int	rxdes1;
+	unsigned int	rxdes2;	/* not used by HW */
+	unsigned int	rxdes3;	/* RXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_RXDES0_VDBC		0x3fff
+#define FTGMAC100_RXDES0_EDORR		(1 << 15)
+#define FTGMAC100_RXDES0_MULTICAST	(1 << 16)
+#define FTGMAC100_RXDES0_BROADCAST	(1 << 17)
+#define FTGMAC100_RXDES0_RX_ERR		(1 << 18)
+#define FTGMAC100_RXDES0_CRC_ERR	(1 << 19)
+#define FTGMAC100_RXDES0_FTL		(1 << 20)
+#define FTGMAC100_RXDES0_RUNT		(1 << 21)
+#define FTGMAC100_RXDES0_RX_ODD_NB	(1 << 22)
+#define FTGMAC100_RXDES0_FIFO_FULL	(1 << 23)
+#define FTGMAC100_RXDES0_PAUSE_OPCODE	(1 << 24)
+#define FTGMAC100_RXDES0_PAUSE_FRAME	(1 << 25)
+#define FTGMAC100_RXDES0_LRS		(1 << 28)
+#define FTGMAC100_RXDES0_FRS		(1 << 29)
+#define FTGMAC100_RXDES0_RXPKT_RDY	(1 << 31)
+
+#define FTGMAC100_RXDES1_VLANTAG_CI	0xffff
+#define FTGMAC100_RXDES1_PROT_MASK	(0x3 << 20)
+#define FTGMAC100_RXDES1_PROT_NONIP	(0x0 << 20)
+#define FTGMAC100_RXDES1_PROT_IP	(0x1 << 20)
+#define FTGMAC100_RXDES1_PROT_TCPIP	(0x2 << 20)
+#define FTGMAC100_RXDES1_PROT_UDPIP	(0x3 << 20)
+#define FTGMAC100_RXDES1_LLC		(1 << 22)
+#define FTGMAC100_RXDES1_DF		(1 << 23)
+#define FTGMAC100_RXDES1_VLANTAG_AVAIL	(1 << 24)
+#define FTGMAC100_RXDES1_TCP_CHKSUM_ERR	(1 << 25)
+#define FTGMAC100_RXDES1_UDP_CHKSUM_ERR	(1 << 26)
+#define FTGMAC100_RXDES1_IP_CHKSUM_ERR	(1 << 27)
+
+#endif /* __FTGMAC100_H */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2dfcc80..dc0a7aa 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -62,6 +62,9 @@
  *  The driver then cleans up the buffer.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DEBUG
+
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
@@ -213,8 +216,7 @@
 			} else {
 				skb = gfar_new_skb(ndev);
 				if (!skb) {
-					pr_err("%s: Can't allocate RX buffers\n",
-							ndev->name);
+					netdev_err(ndev, "Can't allocate RX buffers\n");
 					goto err_rxalloc_fail;
 				}
 				rx_queue->rx_skbuff[j] = skb;
@@ -258,9 +260,8 @@
 			sizeof(struct rxbd8) * priv->total_rx_ring_size,
 			&addr, GFP_KERNEL);
 	if (!vaddr) {
-		if (netif_msg_ifup(priv))
-			pr_err("%s: Could not allocate buffer descriptors!\n",
-			       ndev->name);
+		netif_err(priv, ifup, ndev,
+			  "Could not allocate buffer descriptors!\n");
 		return -ENOMEM;
 	}
 
@@ -290,9 +291,8 @@
 		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
 				  tx_queue->tx_ring_size, GFP_KERNEL);
 		if (!tx_queue->tx_skbuff) {
-			if (netif_msg_ifup(priv))
-				pr_err("%s: Could not allocate tx_skbuff\n",
-						ndev->name);
+			netif_err(priv, ifup, ndev,
+				  "Could not allocate tx_skbuff\n");
 			goto cleanup;
 		}
 
@@ -306,9 +306,8 @@
 				  rx_queue->rx_ring_size, GFP_KERNEL);
 
 		if (!rx_queue->rx_skbuff) {
-			if (netif_msg_ifup(priv))
-				pr_err("%s: Could not allocate rx_skbuff\n",
-				       ndev->name);
+			netif_err(priv, ifup, ndev,
+				  "Could not allocate rx_skbuff\n");
 			goto cleanup;
 		}
 
@@ -625,9 +624,9 @@
 	num_tx_qs = tx_queues ? *tx_queues : 1;
 
 	if (num_tx_qs > MAX_TX_QS) {
-		printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
-				num_tx_qs, MAX_TX_QS);
-		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+		       num_tx_qs, MAX_TX_QS);
+		pr_err("Cannot do alloc_etherdev, aborting\n");
 		return -EINVAL;
 	}
 
@@ -635,9 +634,9 @@
 	num_rx_qs = rx_queues ? *rx_queues : 1;
 
 	if (num_rx_qs > MAX_RX_QS) {
-		printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
-				num_tx_qs, MAX_TX_QS);
-		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+		       num_rx_qs, MAX_RX_QS);
+		pr_err("Cannot do alloc_etherdev, aborting\n");
 		return -EINVAL;
 	}
 
@@ -655,6 +654,11 @@
 	priv->num_rx_queues = num_rx_qs;
 	priv->num_grps = 0x0;
 
+	/* Init Rx queue filer rule set linked list*/
+	INIT_LIST_HEAD(&priv->rx_list.list);
+	priv->rx_list.count = 0;
+	mutex_init(&priv->rx_queue_access);
+
 	model = of_get_property(np, "model", NULL);
 
 	for (i = 0; i < MAXGROUPS; i++)
@@ -1148,9 +1152,8 @@
 		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
 	}
 
-	/* enable filer if using multiple RX queues*/
-	if(priv->num_rx_queues > 1)
-		priv->rx_filer_enable = 1;
+	/* always enable rx filer*/
+	priv->rx_filer_enable = 1;
 	/* Enable most messages by default */
 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
 
@@ -1160,8 +1163,7 @@
 	err = register_netdev(dev);
 
 	if (err) {
-		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
-				dev->name);
+		pr_err("%s: Cannot register net device, aborting\n", dev->name);
 		goto register_fail;
 	}
 
@@ -1212,17 +1214,17 @@
 	gfar_init_sysfs(dev);
 
 	/* Print out the device info */
-	printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
+	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
 
 	/* Even more device info helps when determining which kernel */
 	/* provided which set of benchmarks. */
-	printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
+	netdev_info(dev, "Running with NAPI enabled\n");
 	for (i = 0; i < priv->num_rx_queues; i++)
-		printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
-			dev->name, i, priv->rx_queue[i]->rx_ring_size);
+		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
+			    i, priv->rx_queue[i]->rx_ring_size);
 	for(i = 0; i < priv->num_tx_queues; i++)
-		 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
-			dev->name, i, priv->tx_queue[i]->tx_ring_size);
+		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
+			    i, priv->tx_queue[i]->tx_ring_size);
 
 	return 0;
 
@@ -1855,34 +1857,30 @@
 		 * Transmit, and Receive */
 		if ((err = request_irq(grp->interruptError, gfar_error, 0,
 				grp->int_name_er,grp)) < 0) {
-			if (netif_msg_intr(priv))
-				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, grp->interruptError);
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  grp->interruptError);
 
 			goto err_irq_fail;
 		}
 
 		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
 				0, grp->int_name_tx, grp)) < 0) {
-			if (netif_msg_intr(priv))
-				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, grp->interruptTransmit);
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  grp->interruptTransmit);
 			goto tx_irq_fail;
 		}
 
 		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
 				grp->int_name_rx, grp)) < 0) {
-			if (netif_msg_intr(priv))
-				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, grp->interruptReceive);
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  grp->interruptReceive);
 			goto rx_irq_fail;
 		}
 	} else {
 		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
 				grp->int_name_tx, grp)) < 0) {
-			if (netif_msg_intr(priv))
-				printk(KERN_ERR "%s: Can't get IRQ %d\n",
-					dev->name, grp->interruptTransmit);
+			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+				  grp->interruptTransmit);
 			goto err_irq_fail;
 		}
 	}
@@ -2351,9 +2349,7 @@
 		frame_size += VLAN_HLEN;
 
 	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
-		if (netif_msg_drv(priv))
-			printk(KERN_ERR "%s: Invalid MTU setting\n",
-					dev->name);
+		netif_err(priv, drv, dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
@@ -2773,9 +2769,7 @@
 				gfar_process_frame(dev, skb, amount_pull);
 
 			} else {
-				if (netif_msg_rx_err(priv))
-					printk(KERN_WARNING
-					       "%s: Missing skb!\n", dev->name);
+				netif_warn(priv, rx_err, dev, "Missing skb!\n");
 				rx_queue->stats.rx_dropped++;
 				priv->extra_stats.rx_skbmissing++;
 			}
@@ -2978,10 +2972,9 @@
 					ecntrl &= ~(ECNTRL_R100);
 				break;
 			default:
-				if (netif_msg_link(priv))
-					printk(KERN_WARNING
-						"%s: Ack!  Speed (%d) is not 10/100/1000!\n",
-						dev->name, phydev->speed);
+				netif_warn(priv, link, dev,
+					   "Ack!  Speed (%d) is not 10/100/1000!\n",
+					   phydev->speed);
 				break;
 			}
 
@@ -3186,8 +3179,8 @@
 
 	/* Hmm... */
 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
-		printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
-		       dev->name, events, gfar_read(&regs->imask));
+		netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+			   events, gfar_read(&regs->imask));
 
 	/* Update the error counters */
 	if (events & IEVENT_TXE) {
@@ -3200,9 +3193,8 @@
 		if (events & IEVENT_XFUN) {
 			unsigned long flags;
 
-			if (netif_msg_tx_err(priv))
-				printk(KERN_DEBUG "%s: TX FIFO underrun, "
-				       "packet dropped.\n", dev->name);
+			netif_dbg(priv, tx_err, dev,
+				  "TX FIFO underrun, packet dropped\n");
 			dev->stats.tx_dropped++;
 			priv->extra_stats.tx_underrun++;
 
@@ -3215,8 +3207,7 @@
 			unlock_tx_qs(priv);
 			local_irq_restore(flags);
 		}
-		if (netif_msg_tx_err(priv))
-			printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
+		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
 	}
 	if (events & IEVENT_BSY) {
 		dev->stats.rx_errors++;
@@ -3224,29 +3215,25 @@
 
 		gfar_receive(irq, grp_id);
 
-		if (netif_msg_rx_err(priv))
-			printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
-			       dev->name, gfar_read(&regs->rstat));
+		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+			  gfar_read(&regs->rstat));
 	}
 	if (events & IEVENT_BABR) {
 		dev->stats.rx_errors++;
 		priv->extra_stats.rx_babr++;
 
-		if (netif_msg_rx_err(priv))
-			printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
+		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
 	}
 	if (events & IEVENT_EBERR) {
 		priv->extra_stats.eberr++;
-		if (netif_msg_rx_err(priv))
-			printk(KERN_DEBUG "%s: bus error\n", dev->name);
+		netif_dbg(priv, rx_err, dev, "bus error\n");
 	}
-	if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
-		printk(KERN_DEBUG "%s: control frame\n", dev->name);
+	if (events & IEVENT_RXC)
+		netif_dbg(priv, rx_status, dev, "control frame\n");
 
 	if (events & IEVENT_BABT) {
 		priv->extra_stats.tx_babt++;
-		if (netif_msg_tx_err(priv))
-			printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
+		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
 	}
 	return IRQ_HANDLED;
 }
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ba36dc7..76f14d0 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -47,6 +47,16 @@
 #include <linux/workqueue.h>
 #include <linux/ethtool.h>
 
+struct ethtool_flow_spec_container {
+	struct ethtool_rx_flow_spec fs;
+	struct list_head list;
+};
+
+struct ethtool_rx_list {
+	struct list_head list;
+	unsigned int count;
+};
+
 /* The maximum number of packets to be handled in one call of gfar_poll */
 #define GFAR_DEV_WEIGHT 64
 
@@ -168,6 +178,7 @@
 #define MACCFG2_LENGTHCHECK	0x00000010
 #define MACCFG2_MPEN		0x00000008
 
+#define ECNTRL_FIFM		0x00008000
 #define ECNTRL_INIT_SETTINGS	0x00001000
 #define ECNTRL_TBI_MODE         0x00000020
 #define ECNTRL_REDUCED_MODE	0x00000010
@@ -271,6 +282,7 @@
 #define RCTRL_TUCSEN		0x00000100
 #define RCTRL_PRSDEP_MASK	0x000000c0
 #define RCTRL_PRSDEP_INIT	0x000000c0
+#define RCTRL_PRSFM		0x00000020
 #define RCTRL_PROM		0x00000008
 #define RCTRL_EMEN		0x00000002
 #define RCTRL_REQ_PARSER	(RCTRL_VLEX | RCTRL_IPCSEN | \
@@ -1066,6 +1078,9 @@
 
 	struct vlan_group *vlgrp;
 
+	/* RX queue filer rule set*/
+	struct ethtool_rx_list rx_list;
+	struct mutex rx_queue_access;
 
 	/* Hash registers and their width */
 	u32 __iomem *hash_regs[16];
@@ -1142,6 +1157,16 @@
 	gfar_write(&regs->rqfpr, fpr);
 }
 
+static inline void gfar_read_filer(struct gfar_private *priv,
+		unsigned int far, unsigned int *fcr, unsigned int *fpr)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	gfar_write(&regs->rqfar, far);
+	*fcr = gfar_read(&regs->rqfcr);
+	*fpr = gfar_read(&regs->rqfpr);
+}
+
 extern void lock_rx_qs(struct gfar_private *priv);
 extern void lock_tx_qs(struct gfar_private *priv);
 extern void unlock_rx_qs(struct gfar_private *priv);
@@ -1159,4 +1184,32 @@
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
+#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX))
+
+#define RQFCR_PID_PRI_MASK 0xFFFFFFF8
+#define RQFCR_PID_L4P_MASK 0xFFFFFF00
+#define RQFCR_PID_VID_MASK 0xFFFFF000
+#define RQFCR_PID_PORT_MASK 0xFFFF0000
+#define RQFCR_PID_MAC_MASK 0xFF000000
+
+struct gfar_mask_entry {
+	unsigned int mask; /* The mask value which is valid form start to end */
+	unsigned int start;
+	unsigned int end;
+	unsigned int block; /* Same block values indicate depended entries */
+};
+
+/* Represents a receive filer table entry */
+struct gfar_filer_entry {
+	u32 ctrl;
+	u32 prop;
+};
+
+
+/* The 20 additional entries are a shadow for one extra element */
+struct filer_table {
+	u32 index;
+	struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+};
+
 #endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 239e333..2ecdc9a 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -16,6 +16,8 @@
  *  by reference.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
@@ -37,6 +39,7 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
+#include <linux/sort.h>
 
 #include "gianfar.h"
 
@@ -375,13 +378,13 @@
 	/* Check the bounds of the values */
 	if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 		pr_info("Coalescing is limited to %d microseconds\n",
-				GFAR_MAX_COAL_USECS);
+			GFAR_MAX_COAL_USECS);
 		return -EINVAL;
 	}
 
 	if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 		pr_info("Coalescing is limited to %d frames\n",
-				GFAR_MAX_COAL_FRAMES);
+			GFAR_MAX_COAL_FRAMES);
 		return -EINVAL;
 	}
 
@@ -404,13 +407,13 @@
 	/* Check the bounds of the values */
 	if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 		pr_info("Coalescing is limited to %d microseconds\n",
-				GFAR_MAX_COAL_USECS);
+			GFAR_MAX_COAL_USECS);
 		return -EINVAL;
 	}
 
 	if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 		pr_info("Coalescing is limited to %d frames\n",
-				GFAR_MAX_COAL_FRAMES);
+			GFAR_MAX_COAL_FRAMES);
 		return -EINVAL;
 	}
 
@@ -464,8 +467,7 @@
 		return -EINVAL;
 
 	if (!is_power_of_2(rvals->rx_pending)) {
-		printk("%s: Ring sizes must be a power of 2\n",
-				dev->name);
+		netdev_err(dev, "Ring sizes must be a power of 2\n");
 		return -EINVAL;
 	}
 
@@ -473,8 +475,7 @@
 		return -EINVAL;
 
 	if (!is_power_of_2(rvals->tx_pending)) {
-		printk("%s: Ring sizes must be a power of 2\n",
-				dev->name);
+		netdev_err(dev, "Ring sizes must be a power of 2\n");
 		return -EINVAL;
 	}
 
@@ -700,7 +701,7 @@
 		cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
 		break;
 	default:
-		printk(KERN_ERR "Right now this class is not supported\n");
+		pr_err("Right now this class is not supported\n");
 		return 0;
 	}
 
@@ -715,8 +716,7 @@
 	}
 
 	if (i == MAX_FILER_IDX + 1) {
-		printk(KERN_ERR "No parse rule found, ");
-		printk(KERN_ERR "can't create hash rules\n");
+		pr_err("No parse rule found, can't create hash rules\n");
 		return 0;
 	}
 
@@ -773,19 +773,945 @@
 	return 0;
 }
 
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = NULL;
+	u32 i;
+
+	regs = priv->gfargrp[0].regs;
+
+	/* Check if we are in FIFO mode */
+	i = gfar_read(&regs->ecntrl);
+	i &= ECNTRL_FIFM;
+	if (i == ECNTRL_FIFM) {
+		netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+		i = gfar_read(&regs->rctrl);
+		i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+		if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+			netdev_info(priv->ndev,
+					"Receive Queue Filtering enabled\n");
+		} else {
+			netdev_warn(priv->ndev,
+					"Receive Queue Filtering disabled\n");
+			return -EOPNOTSUPP;
+		}
+	}
+	/* Or in standard mode */
+	else {
+		i = gfar_read(&regs->rctrl);
+		i &= RCTRL_PRSDEP_MASK;
+		if (i == RCTRL_PRSDEP_MASK) {
+			netdev_info(priv->ndev,
+					"Receive Queue Filtering enabled\n");
+		} else {
+			netdev_warn(priv->ndev,
+					"Receive Queue Filtering disabled\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	/* Sets the properties for arbitrary filer rule
+	 * to the first 4 Layer 4 Bytes */
+	regs->rbifx = 0xC0C1C2C3;
+	return 0;
+}
+
+static int gfar_comp_asc(const void *a, const void *b)
+{
+	return memcmp(a, b, 4);
+}
+
+static int gfar_comp_desc(const void *a, const void *b)
+{
+	return -memcmp(a, b, 4);
+}
+
+static void gfar_swap(void *a, void *b, int size)
+{
+	u32 *_a = a;
+	u32 *_b = b;
+
+	swap(_a[0], _b[0]);
+	swap(_a[1], _b[1]);
+	swap(_a[2], _b[2]);
+	swap(_a[3], _b[3]);
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+	tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+	tab->fe[tab->index].prop = mask;
+	tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+	gfar_set_mask(mask, tab);
+	tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
+			| RQFCR_AND;
+	tab->fe[tab->index].prop = value;
+	tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+		struct filer_table *tab)
+{
+	gfar_set_mask(mask, tab);
+	tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+	tab->fe[tab->index].prop = value;
+	tab->index++;
+}
+
+/*
+ * For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+		struct filer_table *tab)
+{
+	switch (flag) {
+	/* 3bit */
+	case RQFCR_PID_PRI:
+		if (!(value | mask))
+			return;
+		mask |= RQFCR_PID_PRI_MASK;
+		break;
+		/* 8bit */
+	case RQFCR_PID_L4P:
+	case RQFCR_PID_TOS:
+		if (!~(mask | RQFCR_PID_L4P_MASK))
+			return;
+		if (!mask)
+			mask = ~0;
+		else
+			mask |= RQFCR_PID_L4P_MASK;
+		break;
+		/* 12bit */
+	case RQFCR_PID_VID:
+		if (!(value | mask))
+			return;
+		mask |= RQFCR_PID_VID_MASK;
+		break;
+		/* 16bit */
+	case RQFCR_PID_DPT:
+	case RQFCR_PID_SPT:
+	case RQFCR_PID_ETY:
+		if (!~(mask | RQFCR_PID_PORT_MASK))
+			return;
+		if (!mask)
+			mask = ~0;
+		else
+			mask |= RQFCR_PID_PORT_MASK;
+		break;
+		/* 24bit */
+	case RQFCR_PID_DAH:
+	case RQFCR_PID_DAL:
+	case RQFCR_PID_SAH:
+	case RQFCR_PID_SAL:
+		if (!(value | mask))
+			return;
+		mask |= RQFCR_PID_MAC_MASK;
+		break;
+		/* for all real 32bit masks */
+	default:
+		if (!~mask)
+			return;
+		if (!mask)
+			mask = ~0;
+		break;
+	}
+	gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+		struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+{
+	gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+	gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+	gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
+	gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+		struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+{
+	gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+	gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+	gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+	gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
+			tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+		struct filer_table *tab)
+{
+	u32 upper_temp_mask = 0;
+	u32 lower_temp_mask = 0;
+	/* Source address */
+	if (!is_broadcast_ether_addr(mask->h_source)) {
+
+		if (is_zero_ether_addr(mask->h_source)) {
+			upper_temp_mask = 0xFFFFFFFF;
+			lower_temp_mask = 0xFFFFFFFF;
+		} else {
+			upper_temp_mask = mask->h_source[0] << 16
+					| mask->h_source[1] << 8
+					| mask->h_source[2];
+			lower_temp_mask = mask->h_source[3] << 16
+					| mask->h_source[4] << 8
+					| mask->h_source[5];
+		}
+		/* Upper 24bit */
+		gfar_set_attribute(
+				value->h_source[0] << 16 | value->h_source[1]
+						<< 8 | value->h_source[2],
+				upper_temp_mask, RQFCR_PID_SAH, tab);
+		/* And the same for the lower part */
+		gfar_set_attribute(
+				value->h_source[3] << 16 | value->h_source[4]
+						<< 8 | value->h_source[5],
+				lower_temp_mask, RQFCR_PID_SAL, tab);
+	}
+	/* Destination address */
+	if (!is_broadcast_ether_addr(mask->h_dest)) {
+
+		/* Special for destination is limited broadcast */
+		if ((is_broadcast_ether_addr(value->h_dest)
+				&& is_zero_ether_addr(mask->h_dest))) {
+			gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+		} else {
+
+			if (is_zero_ether_addr(mask->h_dest)) {
+				upper_temp_mask = 0xFFFFFFFF;
+				lower_temp_mask = 0xFFFFFFFF;
+			} else {
+				upper_temp_mask = mask->h_dest[0] << 16
+						| mask->h_dest[1] << 8
+						| mask->h_dest[2];
+				lower_temp_mask = mask->h_dest[3] << 16
+						| mask->h_dest[4] << 8
+						| mask->h_dest[5];
+			}
+
+			/* Upper 24bit */
+			gfar_set_attribute(
+					value->h_dest[0] << 16
+							| value->h_dest[1] << 8
+							| value->h_dest[2],
+					upper_temp_mask, RQFCR_PID_DAH, tab);
+			/* And the same for the lower part */
+			gfar_set_attribute(
+					value->h_dest[3] << 16
+							| value->h_dest[4] << 8
+							| value->h_dest[5],
+					lower_temp_mask, RQFCR_PID_DAL, tab);
+		}
+	}
+
+	gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+		struct filer_table *tab)
+{
+	u32 vlan = 0, vlan_mask = 0;
+	u32 id = 0, id_mask = 0;
+	u32 cfi = 0, cfi_mask = 0;
+	u32 prio = 0, prio_mask = 0;
+
+	u32 old_index = tab->index;
+
+	/* Check if vlan is wanted */
+	if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+		if (!rule->m_ext.vlan_tci)
+			rule->m_ext.vlan_tci = 0xFFFF;
+
+		vlan = RQFPR_VLN;
+		vlan_mask = RQFPR_VLN;
+
+		/* Separate the fields */
+		id = rule->h_ext.vlan_tci & 0xFFF;
+		id_mask = rule->m_ext.vlan_tci & 0xFFF;
+		cfi = (rule->h_ext.vlan_tci >> 12) & 1;
+		cfi_mask = (rule->m_ext.vlan_tci >> 12) & 1;
+		prio = (rule->h_ext.vlan_tci >> 13) & 0x7;
+		prio_mask = (rule->m_ext.vlan_tci >> 13) & 0x7;
+
+		if (cfi == 1 && cfi_mask == 1) {
+			vlan |= RQFPR_CFI;
+			vlan_mask |= RQFPR_CFI;
+		} else if (cfi == 0 && cfi_mask == 1) {
+			vlan_mask |= RQFPR_CFI;
+		}
+	}
+
+	switch (rule->flow_type & ~FLOW_EXT) {
+	case TCP_V4_FLOW:
+		gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+				RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+		gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+				&rule->m_u.tcp_ip4_spec, tab);
+		break;
+	case UDP_V4_FLOW:
+		gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+				RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+		gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+				&rule->m_u.udp_ip4_spec, tab);
+		break;
+	case SCTP_V4_FLOW:
+		gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+				tab);
+		gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+		gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
+				(struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+		break;
+	case IP_USER_FLOW:
+		gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+				tab);
+		gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+				(struct ethtool_usrip4_spec *) &rule->m_u, tab);
+		break;
+	case ETHER_FLOW:
+		if (vlan)
+			gfar_set_parse_bits(vlan, vlan_mask, tab);
+		gfar_set_ether((struct ethhdr *) &rule->h_u,
+				(struct ethhdr *) &rule->m_u, tab);
+		break;
+	default:
+		return -1;
+	}
+
+	/* Set the vlan attributes in the end */
+	if (vlan) {
+		gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+		gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+	}
+
+	/* If there has been nothing written till now, it must be a default */
+	if (tab->index == old_index) {
+		gfar_set_mask(0xFFFFFFFF, tab);
+		tab->fe[tab->index].ctrl = 0x20;
+		tab->fe[tab->index].prop = 0x0;
+		tab->index++;
+	}
+
+	/* Remove last AND */
+	tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+	/* Specify which queue to use or to drop */
+	if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+		tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+	else
+		tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+	/* Only big enough entries can be clustered */
+	if (tab->index > (old_index + 2)) {
+		tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+		tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+	}
+
+	/* In rare cases the cache can be full while there is free space in hw */
+	if (tab->index > MAX_FILER_CACHE_IDX - 1)
+		return -EBUSY;
+
+	return 0;
+}
+
+/* Copy size filer entries */
+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
+		struct gfar_filer_entry src[0], s32 size)
+{
+	while (size > 0) {
+		size--;
+		dst[size].ctrl = src[size].ctrl;
+		dst[size].prop = src[size].prop;
+	}
+}
+
+/* Delete the contents of the filer-table between start and end
+ * and collapse them */
+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
+{
+	int length;
+	if (end > MAX_FILER_CACHE_IDX || end < begin)
+		return -EINVAL;
+
+	end++;
+	length = end - begin;
+
+	/* Copy */
+	while (end < tab->index) {
+		tab->fe[begin].ctrl = tab->fe[end].ctrl;
+		tab->fe[begin++].prop = tab->fe[end++].prop;
+
+	}
+	/* Fill up with don't cares */
+	while (begin < tab->index) {
+		tab->fe[begin].ctrl = 0x60;
+		tab->fe[begin].prop = 0xFFFFFFFF;
+		begin++;
+	}
+
+	tab->index -= length;
+	return 0;
+}
+
+/* Make space on the wanted location */
+static int gfar_expand_filer_entries(u32 begin, u32 length,
+		struct filer_table *tab)
+{
+	if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
+			> MAX_FILER_CACHE_IDX)
+		return -EINVAL;
+
+	gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
+			tab->index - length + 1);
+
+	tab->index += length;
+	return 0;
+}
+
+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
+{
+	for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+		if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+				== (RQFCR_AND | RQFCR_CLE))
+			return start;
+	}
+	return -1;
+}
+
+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
+{
+	for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+		if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+				== (RQFCR_CLE))
+			return start;
+	}
+	return -1;
+}
+
+/*
+ * Uses hardwares clustering option to reduce
+ * the number of filer table entries
+ */
+static void gfar_cluster_filer(struct filer_table *tab)
+{
+	s32 i = -1, j, iend, jend;
+
+	while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
+		j = i;
+		while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
+			/*
+			 * The cluster entries self and the previous one
+			 * (a mask) must be identical!
+			 */
+			if (tab->fe[i].ctrl != tab->fe[j].ctrl)
+				break;
+			if (tab->fe[i].prop != tab->fe[j].prop)
+				break;
+			if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
+				break;
+			if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
+				break;
+			iend = gfar_get_next_cluster_end(i, tab);
+			jend = gfar_get_next_cluster_end(j, tab);
+			if (jend == -1 || iend == -1)
+				break;
+			/*
+			 * First we make some free space, where our cluster
+			 * element should be. Then we copy it there and finally
+			 * delete in from its old location.
+			 */
+
+			if (gfar_expand_filer_entries(iend, (jend - j), tab)
+					== -EINVAL)
+				break;
+
+			gfar_copy_filer_entries(&(tab->fe[iend + 1]),
+					&(tab->fe[jend + 1]), jend - j);
+
+			if (gfar_trim_filer_entries(jend - 1,
+					jend + (jend - j), tab) == -EINVAL)
+				return;
+
+			/* Mask out cluster bit */
+			tab->fe[iend].ctrl &= ~(RQFCR_CLE);
+		}
+	}
+}
+
+/* Swaps the 0xFF80 masked bits of a1<>a2 and b1<>b2 */
+static void gfar_swap_ff80_bits(struct gfar_filer_entry *a1,
+		struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
+		struct gfar_filer_entry *b2)
+{
+	u32 temp[4];
+	temp[0] = a1->ctrl & 0xFF80;
+	temp[1] = a2->ctrl & 0xFF80;
+	temp[2] = b1->ctrl & 0xFF80;
+	temp[3] = b2->ctrl & 0xFF80;
+
+	a1->ctrl &= ~0xFF80;
+	a2->ctrl &= ~0xFF80;
+	b1->ctrl &= ~0xFF80;
+	b2->ctrl &= ~0xFF80;
+
+	a1->ctrl |= temp[1];
+	a2->ctrl |= temp[0];
+	b1->ctrl |= temp[3];
+	b2->ctrl |= temp[2];
+}
+
+/*
+ * Generate a list consisting of masks values with their start and
+ * end of validity and block as indicator for parts belonging
+ * together (glued by ANDs) in mask_table
+ */
+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
+		struct filer_table *tab)
+{
+	u32 i, and_index = 0, block_index = 1;
+
+	for (i = 0; i < tab->index; i++) {
+
+		/* LSByte of control = 0 sets a mask */
+		if (!(tab->fe[i].ctrl & 0xF)) {
+			mask_table[and_index].mask = tab->fe[i].prop;
+			mask_table[and_index].start = i;
+			mask_table[and_index].block = block_index;
+			if (and_index >= 1)
+				mask_table[and_index - 1].end = i - 1;
+			and_index++;
+		}
+		/* cluster starts will be separated because they should
+		 * hold their position */
+		if (tab->fe[i].ctrl & RQFCR_CLE)
+			block_index++;
+		/* A not set AND indicates the end of a depended block */
+		if (!(tab->fe[i].ctrl & RQFCR_AND))
+			block_index++;
+
+	}
+
+	mask_table[and_index - 1].end = i - 1;
+
+	return and_index;
+}
+
+/*
+ * Sorts the entries of mask_table by the values of the masks.
+ * Important: The 0xFF80 flags of the first and last entry of a
+ * block must hold their position (which queue, CLusterEnable, ReJEct,
+ * AND)
+ */
+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
+		struct filer_table *temp_table, u32 and_index)
+{
+	/* Pointer to compare function (_asc or _desc) */
+	int (*gfar_comp)(const void *, const void *);
+
+	u32 i, size = 0, start = 0, prev = 1;
+	u32 old_first, old_last, new_first, new_last;
+
+	gfar_comp = &gfar_comp_desc;
+
+	for (i = 0; i < and_index; i++) {
+
+		if (prev != mask_table[i].block) {
+			old_first = mask_table[start].start + 1;
+			old_last = mask_table[i - 1].end;
+			sort(mask_table + start, size,
+					sizeof(struct gfar_mask_entry),
+					gfar_comp, &gfar_swap);
+
+			/* Toggle order for every block. This makes the
+			 * thing more efficient! */
+			if (gfar_comp == gfar_comp_desc)
+				gfar_comp = &gfar_comp_asc;
+			else
+				gfar_comp = &gfar_comp_desc;
+
+			new_first = mask_table[start].start + 1;
+			new_last = mask_table[i - 1].end;
+
+			gfar_swap_ff80_bits(&temp_table->fe[new_first],
+					&temp_table->fe[old_first],
+					&temp_table->fe[new_last],
+					&temp_table->fe[old_last]);
+
+			start = i;
+			size = 0;
+		}
+		size++;
+		prev = mask_table[i].block;
+	}
+
+}
+
+/*
+ * Reduces the number of masks needed in the filer table to save entries
+ * This is done by sorting the masks of a depended block. A depended block is
+ * identified by gluing ANDs or CLE. The sorting order toggles after every
+ * block. Of course entries in scope of a mask must change their location with
+ * it.
+ */
+static int gfar_optimize_filer_masks(struct filer_table *tab)
+{
+	struct filer_table *temp_table;
+	struct gfar_mask_entry *mask_table;
+
+	u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
+	s32 ret = 0;
+
+	/* We need a copy of the filer table because
+	 * we want to change its order */
+	temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+	if (temp_table == NULL)
+		return -ENOMEM;
+	memcpy(temp_table, tab, sizeof(*temp_table));
+
+	mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
+			sizeof(struct gfar_mask_entry), GFP_KERNEL);
+
+	if (mask_table == NULL) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	and_index = gfar_generate_mask_table(mask_table, tab);
+
+	gfar_sort_mask_table(mask_table, temp_table, and_index);
+
+	/* Now we can copy the data from our duplicated filer table to
+	 * the real one in the order the mask table says */
+	for (i = 0; i < and_index; i++) {
+		size = mask_table[i].end - mask_table[i].start + 1;
+		gfar_copy_filer_entries(&(tab->fe[j]),
+				&(temp_table->fe[mask_table[i].start]), size);
+		j += size;
+	}
+
+	/* And finally we just have to check for duplicated masks and drop the
+	 * second ones */
+	for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+		if (tab->fe[i].ctrl == 0x80) {
+			previous_mask = i++;
+			break;
+		}
+	}
+	for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+		if (tab->fe[i].ctrl == 0x80) {
+			if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
+				/* Two identical ones found!
+				 * So drop the second one! */
+				gfar_trim_filer_entries(i, i, tab);
+			} else
+				/* Not identical! */
+				previous_mask = i;
+		}
+	}
+
+	kfree(mask_table);
+end:	kfree(temp_table);
+	return ret;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+		struct filer_table *tab)
+{
+	u32 i = 0;
+	if (tab->index > MAX_FILER_IDX - 1)
+		return -EBUSY;
+
+	/* Avoid inconsistent filer table to be processed */
+	lock_rx_qs(priv);
+
+	/* Fill regular entries */
+	for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+		gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+	/* Fill the rest with fall-troughs */
+	for (; i < MAX_FILER_IDX - 1; i++)
+		gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+	/* Last entry must be default accept
+	 * because that's what people expect */
+	gfar_write_filer(priv, i, 0x20, 0x0);
+
+	unlock_rx_qs(priv);
+
+	return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+		struct gfar_private *priv)
+{
+
+	if (flow->flow_type & FLOW_EXT)	{
+		if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+			netdev_warn(priv->ndev,
+					"User-specific data not supported!\n");
+		if (~flow->m_ext.vlan_etype)
+			netdev_warn(priv->ndev,
+					"VLAN-etype not supported!\n");
+	}
+	if (flow->flow_type == IP_USER_FLOW)
+		if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+			netdev_warn(priv->ndev,
+					"IP-Version differing from IPv4 not supported!\n");
+
+	return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+	struct ethtool_flow_spec_container *j;
+	struct filer_table *tab;
+	s32 i = 0;
+	s32 ret = 0;
+
+	/* So index is set to zero, too! */
+	tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+	if (tab == NULL)
+		return -ENOMEM;
+
+	/* Now convert the existing filer data from flow_spec into
+	 * filer tables binary format */
+	list_for_each_entry(j, &priv->rx_list.list, list) {
+		ret = gfar_convert_to_filer(&j->fs, tab);
+		if (ret == -EBUSY) {
+			netdev_err(priv->ndev, "Rule not added: No free space!\n");
+			goto end;
+		}
+		if (ret == -1) {
+			netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+			goto end;
+		}
+	}
+
+	i = tab->index;
+
+	/* Optimizations to save entries */
+	gfar_cluster_filer(tab);
+	gfar_optimize_filer_masks(tab);
+
+	pr_debug("\n\tSummary:\n"
+		"\tData on hardware: %d\n"
+		"\tCompression rate: %d%%\n",
+		tab->index, 100 - (100 * tab->index) / i);
+
+	/* Write everything to hardware */
+	ret = gfar_write_filer_table(priv, tab);
+	if (ret == -EBUSY) {
+		netdev_err(priv->ndev, "Rule not added: No free space!\n");
+		goto end;
+	}
+
+end:	kfree(tab);
+	return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+	u32 i = 0;
+
+	for (i = 0; i < sizeof(flow->m_u); i++)
+		flow->m_u.hdata[i] ^= 0xFF;
+
+	flow->m_ext.vlan_etype ^= 0xFFFF;
+	flow->m_ext.vlan_tci ^= 0xFFFF;
+	flow->m_ext.data[0] ^= ~0;
+	flow->m_ext.data[1] ^= ~0;
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+		struct ethtool_rx_flow_spec *flow)
+{
+	struct ethtool_flow_spec_container *temp, *comp;
+	int ret = 0;
+
+	temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+	if (temp == NULL)
+		return -ENOMEM;
+	memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+	gfar_invert_masks(&temp->fs);
+	ret = gfar_check_capability(&temp->fs, priv);
+	if (ret)
+		goto clean_mem;
+	/* Link in the new element at the right @location */
+	if (list_empty(&priv->rx_list.list)) {
+		ret = gfar_check_filer_hardware(priv);
+		if (ret != 0)
+			goto clean_mem;
+		list_add(&temp->list, &priv->rx_list.list);
+		goto process;
+	} else {
+
+		list_for_each_entry(comp, &priv->rx_list.list, list) {
+			if (comp->fs.location > flow->location) {
+				list_add_tail(&temp->list, &comp->list);
+				goto process;
+			}
+			if (comp->fs.location == flow->location) {
+				netdev_err(priv->ndev,
+						"Rule not added: ID %d not free!\n",
+					flow->location);
+				ret = -EBUSY;
+				goto clean_mem;
+			}
+		}
+		list_add_tail(&temp->list, &priv->rx_list.list);
+	}
+
+process:
+	ret = gfar_process_filer_changes(priv);
+	if (ret)
+		goto clean_list;
+	priv->rx_list.count++;
+	return ret;
+
+clean_list:
+	list_del(&temp->list);
+clean_mem:
+	kfree(temp);
+	return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+	struct ethtool_flow_spec_container *comp;
+	u32 ret = -EINVAL;
+
+	if (list_empty(&priv->rx_list.list))
+		return ret;
+
+	list_for_each_entry(comp, &priv->rx_list.list, list) {
+		if (comp->fs.location == loc) {
+			list_del(&comp->list);
+			kfree(comp);
+			priv->rx_list.count--;
+			gfar_process_filer_changes(priv);
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_flow_spec_container *comp;
+	u32 ret = -EINVAL;
+
+	list_for_each_entry(comp, &priv->rx_list.list, list) {
+		if (comp->fs.location == cmd->fs.location) {
+			memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+			gfar_invert_masks(&cmd->fs);
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+		struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+	struct ethtool_flow_spec_container *comp;
+	u32 i = 0;
+
+	list_for_each_entry(comp, &priv->rx_list.list, list) {
+		if (i <= cmd->rule_cnt) {
+			rule_locs[i] = comp->fs.location;
+			i++;
+		}
+	}
+
+	cmd->data = MAX_FILER_IDX;
+
+	return 0;
+}
+
 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	int ret = 0;
 
-	switch(cmd->cmd) {
+	mutex_lock(&priv->rx_queue_access);
+
+	switch (cmd->cmd) {
 	case ETHTOOL_SRXFH:
 		ret = gfar_set_hash_opts(priv, cmd);
 		break;
+	case ETHTOOL_SRXCLSRLINS:
+		if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+			cmd->fs.ring_cookie >= priv->num_rx_queues) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = gfar_add_cls(priv, &cmd->fs);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = gfar_del_cls(priv, cmd->fs.location);
+		break;
 	default:
 		ret = -EINVAL;
 	}
 
+	mutex_unlock(&priv->rx_queue_access);
+
+	return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+		void *rule_locs)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	int ret = 0;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = priv->num_rx_queues;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = priv->rx_list.count;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = gfar_get_cls(priv, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
 	return ret;
 }
 
@@ -810,4 +1736,5 @@
 	.set_wol = gfar_set_wol,
 #endif
 	.set_rxnfc = gfar_set_nfc,
+	.get_rxnfc = gfar_get_nfc,
 };
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f181304..69b86d7 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 99cdce3..a974727 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -76,6 +76,7 @@
 #include <linux/ioport.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/hdlcdrv.h>
 #include <linux/baycom.h>
 #include <linux/jiffies.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index d92fe6c..e349d86 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -66,6 +66,7 @@
 #include <linux/ioport.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <linux/hdlcdrv.h>
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 82bffc3..2991736 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -30,6 +30,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/delay.h>
 
 #include <asm/system.h>
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index ef20143..18564d4 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -30,6 +30,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/delay.h>
 
 #include <asm/system.h>
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index b388d78..838c5b6 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -34,6 +34,7 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/pm.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 4fecaed..ce53f4a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <net/pkt_sched.h>
 #include <net/net_namespace.h>
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 2c28621..fd64c56 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -54,9 +54,8 @@
 #define MAJ 3
 #define MIN 0
 #define BUILD 6
-#define KFIX 2
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-__stringify(BUILD) "-k" __stringify(KFIX)
+__stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1c77fb3..64b47bf 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -45,7 +45,7 @@
 
 #include "igbvf.h"
 
-#define DRV_VERSION "1.0.8-k0"
+#define DRV_VERSION "2.0.0-k"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 58cd320..d4aa40a 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -22,6 +22,7 @@
  */
 #include <linux/crc32.h>
 #include <linux/ethtool.h>
+#include <linux/interrupt.h>
 #include <linux/gfp.h>
 #include <linux/mii.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index d532dde..963067d 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -31,6 +31,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/rtnetlink.h>
 #include <linux/serial_reg.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 174cafa..b45b2cc 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -152,6 +152,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/rtnetlink.h>
 
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 7a963d4..b56636d 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -52,6 +52,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/rtnetlink.h>
 #include <linux/dma-mapping.h>
 #include <linux/pnp.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 001ed0a..b1d1ce3 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -12,6 +12,7 @@
  * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
  *
  */
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index efe05bb..5039f08 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -11,6 +11,7 @@
  *
  ********************************************************************/    
 
+#include <linux/hardirq.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8800e1f..d5072af 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -49,6 +49,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/rtnetlink.h>
 #include <linux/serial_reg.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f504b26..6d64790 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -46,6 +46,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/rtnetlink.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index c6f5848..f903a6a 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -210,7 +210,7 @@
 		break;
 	default:
 		break;
-	};			//Switch
+	}
 }
 
 static unsigned char ReadLPCReg(int iRegNum)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c3d0738..9021d01 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -36,6 +36,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 1f9c3f08..c436660 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -47,6 +47,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/rtnetlink.h>
 #include <linux/dma-mapping.h>
 #include <linux/gfp.h>
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 9ece1fd..b6c296f 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -538,7 +538,7 @@
 	default:
 		veth_error("Unknown ack type %d from LPAR %d.\n",
 				event->base_event.xSubtype, rlp);
-	};
+	}
 }
 
 static void veth_handle_int(struct veth_lpevent *event)
@@ -584,7 +584,7 @@
 	default:
 		veth_error("Unknown interrupt type %d from LPAR %d.\n",
 				event->base_event.xSubtype, rlp);
-	};
+	}
 }
 
 static void veth_handle_event(struct HvLpEvent *event)
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 8ee6612..0d7bc91 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1157,7 +1157,7 @@
 	default:
 		/* bad value */
 		return IXGBE_ERR_CONFIG;
-	};
+	}
 
 	/* Move the flexible bytes to use the ethertype - shift 6 words */
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
@@ -1245,7 +1245,7 @@
 	default:
 		/* bad value */
 		return IXGBE_ERR_CONFIG;
-	};
+	}
 
 	/* Turn perfect match filtering on */
 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index b894b42..de65643 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1292,7 +1292,7 @@
 
 		udelay(5);
 		ixgbe_standby_eeprom(hw);
-	};
+	}
 
 	/*
 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -1374,7 +1374,7 @@
 		 * EEPROM
 		 */
 		mask = mask >> 1;
-	};
+	}
 
 	/* We leave the "DI" bit set to "0" when we leave this routine. */
 	eec &= ~IXGBE_EEC_DI;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index cb1555b..4950d03 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -27,6 +27,7 @@
 
 /* ethtool support for ixgbe */
 
+#include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 08e8e25..06cfaf3 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -32,6 +32,7 @@
 #include <linux/vmalloc.h>
 #include <linux/string.h>
 #include <linux/in.h>
+#include <linux/interrupt.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/pkt_sched.h>
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 28d3cb2..b2c5ecd 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -52,7 +52,7 @@
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.0.0-k2"
+#define DRV_VERSION "2.1.0-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
 	"Copyright (c) 2009 - 2010 Intel Corporation.";
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 78ddd8b..e122493 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <linux/gfp.h>
 #include <asm/hardware/uengine.h>
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index e9aaeca..0d5da06 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -24,6 +24,7 @@
 
 #ifndef __JME_H_INCLUDED__
 #define __JME_H_INCLUDED__
+#include <linux/interrupt.h>
 
 #define DRV_NAME	"jme"
 #define DRV_VERSION	"1.0.8"
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index fc12ac0..4a6ae05 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -23,6 +23,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index bcd9ba6..f56743a 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -13,6 +13,7 @@
 
 #define DEBUG
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 61631ca..aefbdd8 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -23,6 +23,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 41ea592..2ac6c6c 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -17,6 +17,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ioport.h>
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 17b75e5..70eb207 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -410,7 +410,7 @@
 
 	spin_unlock(&ei_local->page_lock);
 	enable_irq_lockdep_irqrestore(dev->irq, &flags);
-
+	skb_tx_timestamp(skb);
 	dev_kfree_skb (skb);
 	dev->stats.tx_bytes += send_length;
 
@@ -758,7 +758,8 @@
 				skb_put(skb, pkt_len);	/* Make room */
 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
 				skb->protocol=eth_type_trans(skb,dev);
-				netif_rx(skb);
+				if (!skb_defer_rx_timestamp(skb))
+					netif_rx(skb);
 				dev->stats.rx_packets++;
 				dev->stats.rx_bytes += pkt_len;
 				if (pkt_stat & ENRSR_PHY)
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index b7948cc..e3f1925 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -48,6 +48,7 @@
 #include <linux/io.h>
 #include <linux/ip.h>
 #include <linux/slab.h>
+#include <linux/interrupt.h>
 
 #include "ll_temac.h"
 
@@ -727,6 +728,8 @@
 	if (lp->tx_bd_tail >= TX_BD_NUM)
 		lp->tx_bd_tail = 0;
 
+	skb_tx_timestamp(skb);
+
 	/* Kick off the transfer */
 	lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 
@@ -772,7 +775,8 @@
 			skb->ip_summed = CHECKSUM_COMPLETE;
 		}
 
-		netif_rx(skb);
+		if (!skb_defer_rx_timestamp(skb))
+			netif_rx(skb);
 
 		ndev->stats.rx_packets++;
 		ndev->stats.rx_bytes += length;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 8a1097c..f9888d2 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -41,6 +41,7 @@
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 6c6a028..dcf6011 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/dma-mapping.h>
@@ -669,6 +670,8 @@
 	entry = NEXT_TX(entry);
 	bp->tx_head = entry;
 
+	skb_tx_timestamp(skb);
+
 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 
 	if (TX_BUFFS_AVAIL(bp) < 1)
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 1c5221f..2074e97 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -13,6 +13,7 @@
 #include <linux/string.h>
 #include <linux/timer.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/crc32.h>
 #include <linux/spinlock.h>
 #include <linux/bitrev.h>
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d6aeaa5..cc67cbe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -414,7 +414,8 @@
 #define MACVLAN_FEATURES \
 	(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
 	 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
-	 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM)
+	 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+	 NETIF_F_HW_VLAN_FILTER)
 
 #define MACVLAN_STATE_MASK \
 	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -509,6 +510,39 @@
 	return stats;
 }
 
+static void macvlan_vlan_rx_register(struct net_device *dev,
+				     struct vlan_group *grp)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	struct net_device *lowerdev = vlan->lowerdev;
+	const struct net_device_ops *ops = lowerdev->netdev_ops;
+
+	if (ops->ndo_vlan_rx_register)
+		ops->ndo_vlan_rx_register(lowerdev, grp);
+}
+
+static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+				    unsigned short vid)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	struct net_device *lowerdev = vlan->lowerdev;
+	const struct net_device_ops *ops = lowerdev->netdev_ops;
+
+	if (ops->ndo_vlan_rx_add_vid)
+		ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+}
+
+static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+				     unsigned short vid)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	struct net_device *lowerdev = vlan->lowerdev;
+	const struct net_device_ops *ops = lowerdev->netdev_ops;
+
+	if (ops->ndo_vlan_rx_kill_vid)
+		ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+}
+
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *drvinfo)
 {
@@ -541,6 +575,9 @@
 	.ndo_set_multicast_list	= macvlan_set_multicast_list,
 	.ndo_get_stats64	= macvlan_dev_get_stats64,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_vlan_rx_register	= macvlan_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= macvlan_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= macvlan_vlan_rx_kill_vid,
 };
 
 void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6696e56..ecee0fe 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -508,6 +508,8 @@
 		vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 		vnet_hdr->csum_start = skb_checksum_start_offset(skb);
 		vnet_hdr->csum_offset = skb->csum_offset;
+	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
 	} /* else everything is zero */
 
 	return 0;
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 869f0ea..004e64a 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -5,6 +5,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index bf84849..3e89a84 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -210,7 +210,6 @@
 	int big_bytes;
 	int max_intr_slots;
 	struct net_device *dev;
-	spinlock_t stats_lock;
 	u8 __iomem *sram;
 	int sram_size;
 	unsigned long board_span;
@@ -377,7 +376,8 @@
 	__raw_writel((__force __u32) val, (__force void __iomem *)p);
 }
 
-static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+						    struct rtnl_link_stats64 *stats);
 
 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
 {
@@ -1013,7 +1013,7 @@
 		cmd.data2 = i;
 		status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
 					    &cmd, 0);
-	};
+	}
 
 	status |=
 	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
@@ -1831,13 +1831,15 @@
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
 	struct myri10ge_slice_state *ss;
+	struct rtnl_link_stats64 link_stats;
 	int slice;
 	int i;
 
 	/* force stats update */
-	(void)myri10ge_get_stats(netdev);
+	memset(&link_stats, 0, sizeof(link_stats));
+	(void)myri10ge_get_stats(netdev, &link_stats);
 	for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
-		data[i] = ((unsigned long *)&netdev->stats)[i];
+		data[i] = ((u64 *)&link_stats)[i];
 
 	data[i++] = (unsigned int)mgp->tx_boundary;
 	data[i++] = (unsigned int)mgp->wc_enabled;
@@ -2976,15 +2978,13 @@
 	return NETDEV_TX_OK;
 }
 
-static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+						    struct rtnl_link_stats64 *stats)
 {
-	struct myri10ge_priv *mgp = netdev_priv(dev);
-	struct myri10ge_slice_netstats *slice_stats;
-	struct net_device_stats *stats = &dev->stats;
+	const struct myri10ge_priv *mgp = netdev_priv(dev);
+	const struct myri10ge_slice_netstats *slice_stats;
 	int i;
 
-	spin_lock(&mgp->stats_lock);
-	memset(stats, 0, sizeof(*stats));
 	for (i = 0; i < mgp->num_slices; i++) {
 		slice_stats = &mgp->ss[i].stats;
 		stats->rx_packets += slice_stats->rx_packets;
@@ -2994,7 +2994,6 @@
 		stats->rx_dropped += slice_stats->rx_dropped;
 		stats->tx_dropped += slice_stats->tx_dropped;
 	}
-	spin_unlock(&mgp->stats_lock);
 	return stats;
 }
 
@@ -3790,7 +3789,7 @@
 	.ndo_open		= myri10ge_open,
 	.ndo_stop		= myri10ge_close,
 	.ndo_start_xmit		= myri10ge_xmit,
-	.ndo_get_stats		= myri10ge_get_stats,
+	.ndo_get_stats64	= myri10ge_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= myri10ge_change_mtu,
 	.ndo_fix_features	= myri10ge_fix_features,
@@ -3964,7 +3963,6 @@
 	setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
 		    (unsigned long)mgp);
 
-	spin_lock_init(&mgp->stats_lock);
 	SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
 	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
 	status = register_netdev(netdev);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
deleted file mode 100644
index 53aeea4..0000000
--- a/drivers/net/myri_sbus.c
+++ /dev/null
@@ -1,1187 +0,0 @@
-/* myri_sbus.c: MyriCOM MyriNET SBUS card driver.
- *
- * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net)
- */
-
-static char version[] =
-        "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n";
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/firmware.h>
-#include <linux/gfp.h>
-
-#include <net/dst.h>
-#include <net/arp.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/idprom.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/auxio.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-
-#include "myri_sbus.h"
-
-/* #define DEBUG_DETECT */
-/* #define DEBUG_IRQ */
-/* #define DEBUG_TRANSMIT */
-/* #define DEBUG_RECEIVE */
-/* #define DEBUG_HEADER */
-
-#ifdef DEBUG_DETECT
-#define DET(x)   printk x
-#else
-#define DET(x)
-#endif
-
-#ifdef DEBUG_IRQ
-#define DIRQ(x)  printk x
-#else
-#define DIRQ(x)
-#endif
-
-#ifdef DEBUG_TRANSMIT
-#define DTX(x)  printk x
-#else
-#define DTX(x)
-#endif
-
-#ifdef DEBUG_RECEIVE
-#define DRX(x)  printk x
-#else
-#define DRX(x)
-#endif
-
-#ifdef DEBUG_HEADER
-#define DHDR(x) printk x
-#else
-#define DHDR(x)
-#endif
-
-/* Firmware name */
-#define FWNAME		"myricom/lanai.bin"
-
-static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
-{
-	/* Clear IRQ mask. */
-	sbus_writel(0, lp + LANAI_EIMASK);
-
-	/* Turn RESET function off. */
-	sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
-}
-
-static void myri_reset_on(void __iomem *cregs)
-{
-	/* Enable RESET function. */
-	sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
-
-	/* Disable IRQ's. */
-	sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
-}
-
-static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
-{
-	sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
-	sbus_writel(0, lp + LANAI_EIMASK);
-	sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
-}
-
-static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
-{
-	sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
-	sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
-}
-
-static inline void bang_the_chip(struct myri_eth *mp)
-{
-	struct myri_shmem __iomem *shmem = mp->shmem;
-	void __iomem *cregs		= mp->cregs;
-
-	sbus_writel(1, &shmem->send);
-	sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
-}
-
-static int myri_do_handshake(struct myri_eth *mp)
-{
-	struct myri_shmem __iomem *shmem = mp->shmem;
-	void __iomem *cregs = mp->cregs;
-	struct myri_channel __iomem *chan = &shmem->channel;
-	int tick 			= 0;
-
-	DET(("myri_do_handshake: "));
-	if (sbus_readl(&chan->state) == STATE_READY) {
-		DET(("Already STATE_READY, failed.\n"));
-		return -1;	/* We're hosed... */
-	}
-
-	myri_disable_irq(mp->lregs, cregs);
-
-	while (tick++ < 25) {
-		u32 softstate;
-
-		/* Wake it up. */
-		DET(("shakedown, CONTROL_WON, "));
-		sbus_writel(1, &shmem->shakedown);
-		sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
-
-		softstate = sbus_readl(&chan->state);
-		DET(("chanstate[%08x] ", softstate));
-		if (softstate == STATE_READY) {
-			DET(("wakeup successful, "));
-			break;
-		}
-
-		if (softstate != STATE_WFN) {
-			DET(("not WFN setting that, "));
-			sbus_writel(STATE_WFN, &chan->state);
-		}
-
-		udelay(20);
-	}
-
-	myri_enable_irq(mp->lregs, cregs);
-
-	if (tick > 25) {
-		DET(("25 ticks we lose, failure.\n"));
-		return -1;
-	}
-	DET(("success\n"));
-	return 0;
-}
-
-static int __devinit myri_load_lanai(struct myri_eth *mp)
-{
-	const struct firmware	*fw;
-	struct net_device	*dev = mp->dev;
-	struct myri_shmem __iomem *shmem = mp->shmem;
-	void __iomem		*rptr;
-	int 			i, lanai4_data_size;
-
-	myri_disable_irq(mp->lregs, mp->cregs);
-	myri_reset_on(mp->cregs);
-
-	rptr = mp->lanai;
-	for (i = 0; i < mp->eeprom.ramsz; i++)
-		sbus_writeb(0, rptr + i);
-
-	if (mp->eeprom.cpuvers >= CPUVERS_3_0)
-		sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
-
-	i = request_firmware(&fw, FWNAME, &mp->myri_op->dev);
-	if (i) {
-		printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
-		       FWNAME, i);
-		return i;
-	}
-	if (fw->size < 2) {
-		printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
-		       fw->size, FWNAME);
-		release_firmware(fw);
-		return -EINVAL;
-	}
-	lanai4_data_size = fw->data[0] << 8 | fw->data[1];
-
-	/* Load executable code. */
-	for (i = 2; i < fw->size; i++)
-		sbus_writeb(fw->data[i], rptr++);
-
-	/* Load data segment. */
-	for (i = 0; i < lanai4_data_size; i++)
-		sbus_writeb(0, rptr++);
-
-	/* Set device address. */
-	sbus_writeb(0, &shmem->addr[0]);
-	sbus_writeb(0, &shmem->addr[1]);
-	for (i = 0; i < 6; i++)
-		sbus_writeb(dev->dev_addr[i],
-			    &shmem->addr[i + 2]);
-
-	/* Set SBUS bursts and interrupt mask. */
-	sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
-	sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
-
-	/* Release the LANAI. */
-	myri_disable_irq(mp->lregs, mp->cregs);
-	myri_reset_off(mp->lregs, mp->cregs);
-	myri_disable_irq(mp->lregs, mp->cregs);
-
-	/* Wait for the reset to complete. */
-	for (i = 0; i < 5000; i++) {
-		if (sbus_readl(&shmem->channel.state) != STATE_READY)
-			break;
-		else
-			udelay(10);
-	}
-
-	if (i == 5000)
-		printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
-
-	i = myri_do_handshake(mp);
-	if (i)
-		printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
-
-	if (mp->eeprom.cpuvers == CPUVERS_4_0)
-		sbus_writel(0, mp->lregs + LANAI_VERS);
-
-	release_firmware(fw);
-	return i;
-}
-
-static void myri_clean_rings(struct myri_eth *mp)
-{
-	struct sendq __iomem *sq = mp->sq;
-	struct recvq __iomem *rq = mp->rq;
-	int i;
-
-	sbus_writel(0, &rq->tail);
-	sbus_writel(0, &rq->head);
-	for (i = 0; i < (RX_RING_SIZE+1); i++) {
-		if (mp->rx_skbs[i] != NULL) {
-			struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
-			u32 dma_addr;
-
-			dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
-			dma_unmap_single(&mp->myri_op->dev, dma_addr,
-					 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
-			dev_kfree_skb(mp->rx_skbs[i]);
-			mp->rx_skbs[i] = NULL;
-		}
-	}
-
-	mp->tx_old = 0;
-	sbus_writel(0, &sq->tail);
-	sbus_writel(0, &sq->head);
-	for (i = 0; i < TX_RING_SIZE; i++) {
-		if (mp->tx_skbs[i] != NULL) {
-			struct sk_buff *skb = mp->tx_skbs[i];
-			struct myri_txd __iomem *txd = &sq->myri_txd[i];
-			u32 dma_addr;
-
-			dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
-			dma_unmap_single(&mp->myri_op->dev, dma_addr,
-					 (skb->len + 3) & ~3,
-					 DMA_TO_DEVICE);
-			dev_kfree_skb(mp->tx_skbs[i]);
-			mp->tx_skbs[i] = NULL;
-		}
-	}
-}
-
-static void myri_init_rings(struct myri_eth *mp, int from_irq)
-{
-	struct recvq __iomem *rq = mp->rq;
-	struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
-	struct net_device *dev = mp->dev;
-	gfp_t gfp_flags = GFP_KERNEL;
-	int i;
-
-	if (from_irq || in_interrupt())
-		gfp_flags = GFP_ATOMIC;
-
-	myri_clean_rings(mp);
-	for (i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
-		u32 dma_addr;
-
-		if (!skb)
-			continue;
-		mp->rx_skbs[i] = skb;
-		skb->dev = dev;
-		skb_put(skb, RX_ALLOC_SIZE);
-
-		dma_addr = dma_map_single(&mp->myri_op->dev,
-					  skb->data, RX_ALLOC_SIZE,
-					  DMA_FROM_DEVICE);
-		sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
-		sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
-		sbus_writel(i, &rxd[i].ctx);
-		sbus_writel(1, &rxd[i].num_sg);
-	}
-	sbus_writel(0, &rq->head);
-	sbus_writel(RX_RING_SIZE, &rq->tail);
-}
-
-static int myri_init(struct myri_eth *mp, int from_irq)
-{
-	myri_init_rings(mp, from_irq);
-	return 0;
-}
-
-static void myri_is_not_so_happy(struct myri_eth *mp)
-{
-}
-
-#ifdef DEBUG_HEADER
-static void dump_ehdr(struct ethhdr *ehdr)
-{
-	printk("ehdr[h_dst(%pM)"
-	       "h_source(%pM)"
-	       "h_proto(%04x)]\n",
-	       ehdr->h_dest, ehdr->h_source, ehdr->h_proto);
-}
-
-static void dump_ehdr_and_myripad(unsigned char *stuff)
-{
-	struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
-
-	printk("pad[%02x:%02x]", stuff[0], stuff[1]);
-	dump_ehdr(ehdr);
-}
-#endif
-
-static void myri_tx(struct myri_eth *mp, struct net_device *dev)
-{
-	struct sendq __iomem *sq= mp->sq;
-	int entry		= mp->tx_old;
-	int limit		= sbus_readl(&sq->head);
-
-	DTX(("entry[%d] limit[%d] ", entry, limit));
-	if (entry == limit)
-		return;
-	while (entry != limit) {
-		struct sk_buff *skb = mp->tx_skbs[entry];
-		u32 dma_addr;
-
-		DTX(("SKB[%d] ", entry));
-		dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
-		dma_unmap_single(&mp->myri_op->dev, dma_addr,
-				 skb->len, DMA_TO_DEVICE);
-		dev_kfree_skb(skb);
-		mp->tx_skbs[entry] = NULL;
-		dev->stats.tx_packets++;
-		entry = NEXT_TX(entry);
-	}
-	mp->tx_old = entry;
-}
-
-/* Determine the packet's protocol ID. The rule here is that we
- * assume 802.3 if the type field is short enough to be a length.
- * This is normal practice and works for any 'now in use' protocol.
- */
-static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
-	struct ethhdr *eth;
-	unsigned char *rawp;
-
-	skb_set_mac_header(skb, MYRI_PAD_LEN);
-	skb_pull(skb, dev->hard_header_len);
-	eth = eth_hdr(skb);
-
-#ifdef DEBUG_HEADER
-	DHDR(("myri_type_trans: "));
-	dump_ehdr(eth);
-#endif
-	if (*eth->h_dest & 1) {
-		if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
-			skb->pkt_type = PACKET_BROADCAST;
-		else
-			skb->pkt_type = PACKET_MULTICAST;
-	} else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
-		if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
-			skb->pkt_type = PACKET_OTHERHOST;
-	}
-
-	if (ntohs(eth->h_proto) >= 1536)
-		return eth->h_proto;
-
-	rawp = skb->data;
-
-	/* This is a magic hack to spot IPX packets. Older Novell breaks
-	 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
-	 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
-	 * won't work for fault tolerant netware but does for the rest.
-	 */
-	if (*(unsigned short *)rawp == 0xFFFF)
-		return htons(ETH_P_802_3);
-
-	/* Real 802.2 LLC */
-	return htons(ETH_P_802_2);
-}
-
-static void myri_rx(struct myri_eth *mp, struct net_device *dev)
-{
-	struct recvq __iomem *rq = mp->rq;
-	struct recvq __iomem *rqa = mp->rqack;
-	int entry		= sbus_readl(&rqa->head);
-	int limit		= sbus_readl(&rqa->tail);
-	int drops;
-
-	DRX(("entry[%d] limit[%d] ", entry, limit));
-	if (entry == limit)
-		return;
-	drops = 0;
-	DRX(("\n"));
-	while (entry != limit) {
-		struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
-		u32 csum		= sbus_readl(&rxdack->csum);
-		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
-		int index		= sbus_readl(&rxdack->ctx);
-		struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
-		struct sk_buff *skb	= mp->rx_skbs[index];
-
-		/* Ack it. */
-		sbus_writel(NEXT_RX(entry), &rqa->head);
-
-		/* Check for errors. */
-		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
-		dma_sync_single_for_cpu(&mp->myri_op->dev,
-					sbus_readl(&rxd->myri_scatters[0].addr),
-					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
-		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
-			DRX(("ERROR["));
-			dev->stats.rx_errors++;
-			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
-				DRX(("BAD_LENGTH] "));
-				dev->stats.rx_length_errors++;
-			} else {
-				DRX(("NO_PADDING] "));
-				dev->stats.rx_frame_errors++;
-			}
-
-			/* Return it to the LANAI. */
-	drop_it:
-			drops++;
-			DRX(("DROP "));
-			dev->stats.rx_dropped++;
-			dma_sync_single_for_device(&mp->myri_op->dev,
-						   sbus_readl(&rxd->myri_scatters[0].addr),
-						   RX_ALLOC_SIZE,
-						   DMA_FROM_DEVICE);
-			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
-			sbus_writel(index, &rxd->ctx);
-			sbus_writel(1, &rxd->num_sg);
-			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
-			goto next;
-		}
-
-		DRX(("len[%d] ", len));
-		if (len > RX_COPY_THRESHOLD) {
-			struct sk_buff *new_skb;
-			u32 dma_addr;
-
-			DRX(("BIGBUFF "));
-			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
-			if (new_skb == NULL) {
-				DRX(("skb_alloc(FAILED) "));
-				goto drop_it;
-			}
-			dma_unmap_single(&mp->myri_op->dev,
-					 sbus_readl(&rxd->myri_scatters[0].addr),
-					 RX_ALLOC_SIZE,
-					 DMA_FROM_DEVICE);
-			mp->rx_skbs[index] = new_skb;
-			new_skb->dev = dev;
-			skb_put(new_skb, RX_ALLOC_SIZE);
-			dma_addr = dma_map_single(&mp->myri_op->dev,
-						  new_skb->data,
-						  RX_ALLOC_SIZE,
-						  DMA_FROM_DEVICE);
-			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
-			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
-			sbus_writel(index, &rxd->ctx);
-			sbus_writel(1, &rxd->num_sg);
-			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
-
-			/* Trim the original skb for the netif. */
-			DRX(("trim(%d) ", len));
-			skb_trim(skb, len);
-		} else {
-			struct sk_buff *copy_skb = dev_alloc_skb(len);
-
-			DRX(("SMALLBUFF "));
-			if (copy_skb == NULL) {
-				DRX(("dev_alloc_skb(FAILED) "));
-				goto drop_it;
-			}
-			/* DMA sync already done above. */
-			copy_skb->dev = dev;
-			DRX(("resv_and_put "));
-			skb_put(copy_skb, len);
-			skb_copy_from_linear_data(skb, copy_skb->data, len);
-
-			/* Reuse original ring buffer. */
-			DRX(("reuse "));
-			dma_sync_single_for_device(&mp->myri_op->dev,
-						   sbus_readl(&rxd->myri_scatters[0].addr),
-						   RX_ALLOC_SIZE,
-						   DMA_FROM_DEVICE);
-			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
-			sbus_writel(index, &rxd->ctx);
-			sbus_writel(1, &rxd->num_sg);
-			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
-
-			skb = copy_skb;
-		}
-
-		/* Just like the happy meal we get checksums from this card. */
-		skb->csum = csum;
-		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
-
-		skb->protocol = myri_type_trans(skb, dev);
-		DRX(("prot[%04x] netif_rx ", skb->protocol));
-		netif_rx(skb);
-
-		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += len;
-	next:
-		DRX(("NEXT\n"));
-		entry = NEXT_RX(entry);
-	}
-}
-
-static irqreturn_t myri_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev		= (struct net_device *) dev_id;
-	struct myri_eth *mp		= netdev_priv(dev);
-	void __iomem *lregs		= mp->lregs;
-	struct myri_channel __iomem *chan = &mp->shmem->channel;
-	unsigned long flags;
-	u32 status;
-	int handled = 0;
-
-	spin_lock_irqsave(&mp->irq_lock, flags);
-
-	status = sbus_readl(lregs + LANAI_ISTAT);
-	DIRQ(("myri_interrupt: status[%08x] ", status));
-	if (status & ISTAT_HOST) {
-		u32 softstate;
-
-		handled = 1;
-		DIRQ(("IRQ_DISAB "));
-		myri_disable_irq(lregs, mp->cregs);
-		softstate = sbus_readl(&chan->state);
-		DIRQ(("state[%08x] ", softstate));
-		if (softstate != STATE_READY) {
-			DIRQ(("myri_not_so_happy "));
-			myri_is_not_so_happy(mp);
-		}
-		DIRQ(("\nmyri_rx: "));
-		myri_rx(mp, dev);
-		DIRQ(("\nistat=ISTAT_HOST "));
-		sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
-		DIRQ(("IRQ_ENAB "));
-		myri_enable_irq(lregs, mp->cregs);
-	}
-	DIRQ(("\n"));
-
-	spin_unlock_irqrestore(&mp->irq_lock, flags);
-
-	return IRQ_RETVAL(handled);
-}
-
-static int myri_open(struct net_device *dev)
-{
-	struct myri_eth *mp = netdev_priv(dev);
-
-	return myri_init(mp, in_interrupt());
-}
-
-static int myri_close(struct net_device *dev)
-{
-	struct myri_eth *mp = netdev_priv(dev);
-
-	myri_clean_rings(mp);
-	return 0;
-}
-
-static void myri_tx_timeout(struct net_device *dev)
-{
-	struct myri_eth *mp = netdev_priv(dev);
-
-	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
-
-	dev->stats.tx_errors++;
-	myri_init(mp, 0);
-	netif_wake_queue(dev);
-}
-
-static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct myri_eth *mp = netdev_priv(dev);
-	struct sendq __iomem *sq = mp->sq;
-	struct myri_txd __iomem *txd;
-	unsigned long flags;
-	unsigned int head, tail;
-	int len, entry;
-	u32 dma_addr;
-
-	DTX(("myri_start_xmit: "));
-
-	myri_tx(mp, dev);
-
-	netif_stop_queue(dev);
-
-	/* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
-	head = sbus_readl(&sq->head);
-	tail = sbus_readl(&sq->tail);
-
-	if (!TX_BUFFS_AVAIL(head, tail)) {
-		DTX(("no buffs available, returning 1\n"));
-		return NETDEV_TX_BUSY;
-	}
-
-	spin_lock_irqsave(&mp->irq_lock, flags);
-
-	DHDR(("xmit[skbdata(%p)]\n", skb->data));
-#ifdef DEBUG_HEADER
-	dump_ehdr_and_myripad(((unsigned char *) skb->data));
-#endif
-
-	/* XXX Maybe this can go as well. */
-	len = skb->len;
-	if (len & 3) {
-		DTX(("len&3 "));
-		len = (len + 4) & (~3);
-	}
-
-	entry = sbus_readl(&sq->tail);
-
-	txd = &sq->myri_txd[entry];
-	mp->tx_skbs[entry] = skb;
-
-	/* Must do this before we sbus map it. */
-	if (skb->data[MYRI_PAD_LEN] & 0x1) {
-		sbus_writew(0xffff, &txd->addr[0]);
-		sbus_writew(0xffff, &txd->addr[1]);
-		sbus_writew(0xffff, &txd->addr[2]);
-		sbus_writew(0xffff, &txd->addr[3]);
-	} else {
-		sbus_writew(0xffff, &txd->addr[0]);
-		sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
-		sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
-		sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
-	}
-
-	dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
-				  len, DMA_TO_DEVICE);
-	sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
-	sbus_writel(len, &txd->myri_gathers[0].len);
-	sbus_writel(1, &txd->num_sg);
-	sbus_writel(KERNEL_CHANNEL, &txd->chan);
-	sbus_writel(len, &txd->len);
-	sbus_writel((u32)-1, &txd->csum_off);
-	sbus_writel(0, &txd->csum_field);
-
-	sbus_writel(NEXT_TX(entry), &sq->tail);
-	DTX(("BangTheChip "));
-	bang_the_chip(mp);
-
-	DTX(("tbusy=0, returning 0\n"));
-	netif_start_queue(dev);
-	spin_unlock_irqrestore(&mp->irq_lock, flags);
-	return NETDEV_TX_OK;
-}
-
-/* Create the MyriNet MAC header for an arbitrary protocol layer
- *
- * saddr=NULL	means use device source address
- * daddr=NULL	means leave destination address (eg unresolved arp)
- */
-static int myri_header(struct sk_buff *skb, struct net_device *dev,
-		       unsigned short type, const void *daddr,
-		       const void *saddr, unsigned len)
-{
-	struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-	unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
-
-#ifdef DEBUG_HEADER
-	DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
-	dump_ehdr(eth);
-#endif
-
-	/* Set the MyriNET padding identifier. */
-	pad[0] = MYRI_PAD_LEN;
-	pad[1] = 0xab;
-
-	/* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
-	 * length in here instead.
-	 */
-	if (type != ETH_P_802_3 && type != ETH_P_802_2)
-		eth->h_proto = htons(type);
-	else
-		eth->h_proto = htons(len);
-
-	/* Set the source hardware address. */
-	if (saddr)
-		memcpy(eth->h_source, saddr, dev->addr_len);
-	else
-		memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-
-	/* Anyway, the loopback-device should never use this function... */
-	if (dev->flags & IFF_LOOPBACK) {
-		int i;
-		for (i = 0; i < dev->addr_len; i++)
-			eth->h_dest[i] = 0;
-		return dev->hard_header_len;
-	}
-
-	if (daddr) {
-		memcpy(eth->h_dest, daddr, dev->addr_len);
-		return dev->hard_header_len;
-	}
-	return -dev->hard_header_len;
-}
-
-/* Rebuild the MyriNet MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on this
- * sk_buff. We now let ARP fill in the other fields.
- */
-static int myri_rebuild_header(struct sk_buff *skb)
-{
-	unsigned char *pad = (unsigned char *) skb->data;
-	struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
-	struct net_device *dev = skb->dev;
-
-#ifdef DEBUG_HEADER
-	DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
-	dump_ehdr(eth);
-#endif
-
-	/* Refill MyriNet padding identifiers, this is just being anal. */
-	pad[0] = MYRI_PAD_LEN;
-	pad[1] = 0xab;
-
-	switch (eth->h_proto)
-	{
-#ifdef CONFIG_INET
-	case cpu_to_be16(ETH_P_IP):
- 		return arp_find(eth->h_dest, skb);
-#endif
-
-	default:
-		printk(KERN_DEBUG
-		       "%s: unable to resolve type %X addresses.\n",
-		       dev->name, (int)eth->h_proto);
-
-		memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-		return 0;
-		break;
-	}
-
-	return 0;
-}
-
-static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
-{
-	unsigned short type = hh->hh_type;
-	unsigned char *pad;
-	struct ethhdr *eth;
-	const struct net_device *dev = neigh->dev;
-
-	pad = ((unsigned char *) hh->hh_data) +
-		HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
-	eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
-
-	if (type == htons(ETH_P_802_3))
-		return -1;
-
-	/* Refill MyriNet padding identifiers, this is just being anal. */
-	pad[0] = MYRI_PAD_LEN;
-	pad[1] = 0xab;
-
-	eth->h_proto = type;
-	memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-	memcpy(eth->h_dest, neigh->ha, dev->addr_len);
-	hh->hh_len = 16;
-	return 0;
-}
-
-
-/* Called by Address Resolution module to notify changes in address. */
-void myri_header_cache_update(struct hh_cache *hh,
-			      const struct net_device *dev,
-			      const unsigned char * haddr)
-{
-	memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
-	       haddr, dev->addr_len);
-}
-
-static int myri_change_mtu(struct net_device *dev, int new_mtu)
-{
-	if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
-		return -EINVAL;
-	dev->mtu = new_mtu;
-	return 0;
-}
-
-static void myri_set_multicast(struct net_device *dev)
-{
-	/* Do nothing, all MyriCOM nodes transmit multicast frames
-	 * as broadcast packets...
-	 */
-}
-
-static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
-{
-	mp->eeprom.id[0] = 0;
-	mp->eeprom.id[1] = idprom->id_machtype;
-	mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
-	mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
-	mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
-	mp->eeprom.id[5] = num;
-}
-
-static inline void determine_reg_space_size(struct myri_eth *mp)
-{
-	switch(mp->eeprom.cpuvers) {
-	case CPUVERS_2_3:
-	case CPUVERS_3_0:
-	case CPUVERS_3_1:
-	case CPUVERS_3_2:
-		mp->reg_size = (3 * 128 * 1024) + 4096;
-		break;
-
-	case CPUVERS_4_0:
-	case CPUVERS_4_1:
-		mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
-		break;
-
-	case CPUVERS_4_2:
-	case CPUVERS_5_0:
-	default:
-		printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
-		       mp->eeprom.cpuvers);
-		mp->reg_size = (3 * 128 * 1024) + 4096;
-	}
-}
-
-#ifdef DEBUG_DETECT
-static void dump_eeprom(struct myri_eth *mp)
-{
-	printk("EEPROM: clockval[%08x] cpuvers[%04x] "
-	       "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
-	       mp->eeprom.cval, mp->eeprom.cpuvers,
-	       mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
-	       mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
-	printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
-	printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
-	       mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
-	       mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
-	       mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
-	printk("EEPROM:       %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
-	       mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
-	       mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
-	       mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
-	printk("EEPROM:       %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
-	       mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
-	       mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
-	       mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
-	printk("EEPROM:       %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
-	       mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
-	       mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
-	       mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
-	printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
-	       mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
-	       mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
-	       mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
-	printk("EEPROM:       %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
-	       mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
-	       mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
-	       mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
-	printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
-	       mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
-	       mp->eeprom.prod_code);
-	printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
-}
-#endif
-
-static const struct header_ops myri_header_ops = {
-	.create		= myri_header,
-	.rebuild	= myri_rebuild_header,
-	.cache	 	= myri_header_cache,
-	.cache_update	= myri_header_cache_update,
-};
-
-static const struct net_device_ops myri_ops = {
-	.ndo_open		= myri_open,
-	.ndo_stop		= myri_close,
-	.ndo_start_xmit		= myri_start_xmit,
-	.ndo_set_multicast_list	= myri_set_multicast,
-	.ndo_tx_timeout		= myri_tx_timeout,
-	.ndo_change_mtu		= myri_change_mtu,
-	.ndo_set_mac_address	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-static int __devinit myri_sbus_probe(struct platform_device *op)
-{
-	struct device_node *dp = op->dev.of_node;
-	static unsigned version_printed;
-	struct net_device *dev;
-	struct myri_eth *mp;
-	const void *prop;
-	static int num;
-	int i, len;
-
-	DET(("myri_ether_init(%p,%d):\n", op, num));
-	dev = alloc_etherdev(sizeof(struct myri_eth));
-	if (!dev)
-		return -ENOMEM;
-
-	if (version_printed++ == 0)
-		printk(version);
-
-	SET_NETDEV_DEV(dev, &op->dev);
-
-	mp = netdev_priv(dev);
-	spin_lock_init(&mp->irq_lock);
-	mp->myri_op = op;
-
-	/* Clean out skb arrays. */
-	for (i = 0; i < (RX_RING_SIZE + 1); i++)
-		mp->rx_skbs[i] = NULL;
-
-	for (i = 0; i < TX_RING_SIZE; i++)
-		mp->tx_skbs[i] = NULL;
-
-	/* First check for EEPROM information. */
-	prop = of_get_property(dp, "myrinet-eeprom-info", &len);
-
-	if (prop)
-		memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom));
-	if (!prop) {
-		/* No eeprom property, must cook up the values ourselves. */
-		DET(("No EEPROM: "));
-		mp->eeprom.bus_type = BUS_TYPE_SBUS;
-		mp->eeprom.cpuvers =
-			of_getintprop_default(dp, "cpu_version", 0);
-		mp->eeprom.cval =
-			of_getintprop_default(dp, "clock_value", 0);
-		mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0);
-		if (!mp->eeprom.cpuvers)
-			mp->eeprom.cpuvers = CPUVERS_2_3;
-		if (mp->eeprom.cpuvers < CPUVERS_3_0)
-			mp->eeprom.cval = 0;
-		if (!mp->eeprom.ramsz)
-			mp->eeprom.ramsz = (128 * 1024);
-
-		prop = of_get_property(dp, "myrinet-board-id", &len);
-		if (prop)
-			memcpy(&mp->eeprom.id[0], prop, 6);
-		else
-			set_boardid_from_idprom(mp, num);
-
-		prop = of_get_property(dp, "fpga_version", &len);
-		if (prop)
-			memcpy(&mp->eeprom.fvers[0], prop, 32);
-		else
-			memset(&mp->eeprom.fvers[0], 0, 32);
-
-		if (mp->eeprom.cpuvers == CPUVERS_4_1) {
-			if (mp->eeprom.ramsz == (128 * 1024))
-				mp->eeprom.ramsz = (256 * 1024);
-			if ((mp->eeprom.cval == 0x40414041) ||
-			    (mp->eeprom.cval == 0x90449044))
-				mp->eeprom.cval = 0x50e450e4;
-		}
-	}
-#ifdef DEBUG_DETECT
-	dump_eeprom(mp);
-#endif
-
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = mp->eeprom.id[i];
-
-	determine_reg_space_size(mp);
-
-	/* Map in the MyriCOM register/localram set. */
-	if (mp->eeprom.cpuvers < CPUVERS_4_0) {
-		/* XXX Makes no sense, if control reg is non-existent this
-		 * XXX driver cannot function at all... maybe pre-4.0 is
-		 * XXX only a valid version for PCI cards?  Ask feldy...
-		 */
-		DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
-		mp->regs = of_ioremap(&op->resource[0], 0,
-				      mp->reg_size, "MyriCOM Regs");
-		if (!mp->regs) {
-			printk("MyriCOM: Cannot map MyriCOM registers.\n");
-			goto err;
-		}
-		mp->lanai = mp->regs + (256 * 1024);
-		mp->lregs = mp->lanai + (0x10000 * 2);
-	} else {
-		DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
-		mp->cregs = of_ioremap(&op->resource[0], 0,
-				       PAGE_SIZE, "MyriCOM Control Regs");
-		mp->lregs = of_ioremap(&op->resource[0], (256 * 1024),
-					 PAGE_SIZE, "MyriCOM LANAI Regs");
-		mp->lanai = of_ioremap(&op->resource[0], (512 * 1024),
-				       mp->eeprom.ramsz, "MyriCOM SRAM");
-	}
-	DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
-	     mp->cregs, mp->lregs, mp->lanai));
-
-	if (mp->eeprom.cpuvers >= CPUVERS_4_0)
-		mp->shmem_base = 0xf000;
-	else
-		mp->shmem_base = 0x8000;
-
-	DET(("Shared memory base is %04x, ", mp->shmem_base));
-
-	mp->shmem = (struct myri_shmem __iomem *)
-		(mp->lanai + (mp->shmem_base * 2));
-	DET(("shmem mapped at %p\n", mp->shmem));
-
-	mp->rqack	= &mp->shmem->channel.recvqa;
-	mp->rq		= &mp->shmem->channel.recvq;
-	mp->sq		= &mp->shmem->channel.sendq;
-
-	/* Reset the board. */
-	DET(("Resetting LANAI\n"));
-	myri_reset_off(mp->lregs, mp->cregs);
-	myri_reset_on(mp->cregs);
-
-	/* Turn IRQ's off. */
-	myri_disable_irq(mp->lregs, mp->cregs);
-
-	/* Reset once more. */
-	myri_reset_on(mp->cregs);
-
-	/* Get the supported DVMA burst sizes from our SBUS. */
-	mp->myri_bursts = of_getintprop_default(dp->parent,
-						"burst-sizes", 0x00);
-	if (!sbus_can_burst64())
-		mp->myri_bursts &= ~(DMA_BURST64);
-
-	DET(("MYRI bursts %02x\n", mp->myri_bursts));
-
-	/* Encode SBUS interrupt level in second control register. */
-	i = of_getintprop_default(dp, "interrupts", 0);
-	if (i == 0)
-		i = 4;
-	DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
-	     i, (1 << i)));
-
-	sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
-
-	mp->dev = dev;
-	dev->watchdog_timeo = 5*HZ;
-	dev->irq = op->archdata.irqs[0];
-	dev->netdev_ops = &myri_ops;
-
-	/* Register interrupt handler now. */
-	DET(("Requesting MYRIcom IRQ line.\n"));
-	if (request_irq(dev->irq, myri_interrupt,
-			IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
-		printk("MyriCOM: Cannot register interrupt handler.\n");
-		goto err;
-	}
-
-	dev->mtu		= MYRINET_MTU;
-	dev->header_ops		= &myri_header_ops;
-
-	dev->hard_header_len	= (ETH_HLEN + MYRI_PAD_LEN);
-
-	/* Load code onto the LANai. */
-	DET(("Loading LANAI firmware\n"));
-	if (myri_load_lanai(mp)) {
-		printk(KERN_ERR "MyriCOM: Cannot Load LANAI firmware.\n");
-		goto err_free_irq;
-	}
-
-	if (register_netdev(dev)) {
-		printk("MyriCOM: Cannot register device.\n");
-		goto err_free_irq;
-	}
-
-	dev_set_drvdata(&op->dev, mp);
-
-	num++;
-
-	printk("%s: MyriCOM MyriNET Ethernet %pM\n",
-	       dev->name, dev->dev_addr);
-
-	return 0;
-
-err_free_irq:
-	free_irq(dev->irq, dev);
-err:
-	/* This will also free the co-allocated private data*/
-	free_netdev(dev);
-	return -ENODEV;
-}
-
-static int __devexit myri_sbus_remove(struct platform_device *op)
-{
-	struct myri_eth *mp = dev_get_drvdata(&op->dev);
-	struct net_device *net_dev = mp->dev;
-
-	unregister_netdev(net_dev);
-
-	free_irq(net_dev->irq, net_dev);
-
-	if (mp->eeprom.cpuvers < CPUVERS_4_0) {
-		of_iounmap(&op->resource[0], mp->regs, mp->reg_size);
-	} else {
-		of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE);
-		of_iounmap(&op->resource[0], mp->lregs, (256 * 1024));
-		of_iounmap(&op->resource[0], mp->lanai, (512 * 1024));
-	}
-
-	free_netdev(net_dev);
-
-	dev_set_drvdata(&op->dev, NULL);
-
-	return 0;
-}
-
-static const struct of_device_id myri_sbus_match[] = {
-	{
-		.name = "MYRICOM,mlanai",
-	},
-	{
-		.name = "myri",
-	},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, myri_sbus_match);
-
-static struct platform_driver myri_sbus_driver = {
-	.driver = {
-		.name = "myri",
-		.owner = THIS_MODULE,
-		.of_match_table = myri_sbus_match,
-	},
-	.probe		= myri_sbus_probe,
-	.remove		= __devexit_p(myri_sbus_remove),
-};
-
-static int __init myri_sbus_init(void)
-{
-	return platform_driver_register(&myri_sbus_driver);
-}
-
-static void __exit myri_sbus_exit(void)
-{
-	platform_driver_unregister(&myri_sbus_driver);
-}
-
-module_init(myri_sbus_init);
-module_exit(myri_sbus_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FWNAME);
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
deleted file mode 100644
index 80a2fa5..0000000
--- a/drivers/net/myri_sbus.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/* myri_sbus.h: Defines for MyriCOM MyriNET SBUS card driver.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _MYRI_SBUS_H
-#define _MYRI_SBUS_H
-
-/* LANAI Registers */
-#define LANAI_IPF0	0x00UL		/* Context zero state registers.*/
-#define LANAI_CUR0	0x04UL
-#define LANAI_PREV0	0x08UL
-#define LANAI_DATA0	0x0cUL
-#define LANAI_DPF0	0x10UL
-#define LANAI_IPF1	0x14UL		/* Context one state registers.	*/
-#define LANAI_CUR1	0x18UL
-#define LANAI_PREV1	0x1cUL
-#define LANAI_DATA1	0x20UL
-#define LANAI_DPF1	0x24UL
-#define LANAI_ISTAT	0x28UL		/* Interrupt status.		*/
-#define LANAI_EIMASK	0x2cUL		/* External IRQ mask.		*/
-#define LANAI_ITIMER	0x30UL		/* IRQ timer.			*/
-#define LANAI_RTC	0x34UL		/* Real Time Clock		*/
-#define LANAI_CSUM	0x38UL		/* Checksum.			*/
-#define LANAI_DMAXADDR	0x3cUL		/* SBUS DMA external address.	*/
-#define LANAI_DMALADDR	0x40UL		/* SBUS DMA local address.	*/
-#define LANAI_DMACTR	0x44UL		/* SBUS DMA counter.		*/
-#define LANAI_RXDMAPTR	0x48UL		/* Receive DMA pointer.		*/
-#define LANAI_RXDMALIM	0x4cUL		/* Receive DMA limit.		*/
-#define LANAI_TXDMAPTR	0x50UL		/* Transmit DMA pointer.	*/
-#define LANAI_TXDMALIM	0x54UL		/* Transmit DMA limit.		*/
-#define LANAI_TXDMALIMT	0x58UL		/* Transmit DMA limit w/tail.	*/
-	/* 0x5cUL, reserved */
-#define LANAI_RBYTE	0x60UL		/* Receive byte.		*/
-	/* 0x64-->0x6c, reserved */
-#define LANAI_RHALF	0x70UL		/* Receive half-word.		*/
-	/* 0x72UL, reserved */
-#define LANAI_RWORD	0x74UL		/* Receive word.		*/
-#define LANAI_SALIGN	0x78UL		/* Send align.			*/
-#define LANAI_SBYTE	0x7cUL		/* SingleSend send-byte.	*/
-#define LANAI_SHALF	0x80UL		/* SingleSend send-halfword.	*/
-#define LANAI_SWORD	0x84UL		/* SingleSend send-word.	*/
-#define LANAI_SSENDT	0x88UL		/* SingleSend special.		*/
-#define LANAI_DMADIR	0x8cUL		/* DMA direction.		*/
-#define LANAI_DMASTAT	0x90UL		/* DMA status.			*/
-#define LANAI_TIMEO	0x94UL		/* Timeout register.		*/
-#define LANAI_MYRINET	0x98UL		/* XXX MAGIC myricom thing	*/
-#define LANAI_HWDEBUG	0x9cUL		/* Hardware debugging reg.	*/
-#define LANAI_LEDS	0xa0UL		/* LED control.			*/
-#define LANAI_VERS	0xa4UL		/* Version register.		*/
-#define LANAI_LINKON	0xa8UL		/* Link activation reg.		*/
-	/* 0xac-->0x104, reserved */
-#define LANAI_CVAL	0x108UL		/* Clock value register.	*/
-#define LANAI_REG_SIZE	0x10cUL
-
-/* Interrupt status bits. */
-#define ISTAT_DEBUG	0x80000000
-#define ISTAT_HOST	0x40000000
-#define ISTAT_LAN7	0x00800000
-#define ISTAT_LAN6	0x00400000
-#define ISTAT_LAN5	0x00200000
-#define ISTAT_LAN4	0x00100000
-#define ISTAT_LAN3	0x00080000
-#define ISTAT_LAN2	0x00040000
-#define ISTAT_LAN1	0x00020000
-#define ISTAT_LAN0	0x00010000
-#define ISTAT_WRDY	0x00008000
-#define ISTAT_HRDY	0x00004000
-#define ISTAT_SRDY	0x00002000
-#define ISTAT_LINK	0x00001000
-#define ISTAT_FRES	0x00000800
-#define ISTAT_NRES	0x00000800
-#define ISTAT_WAKE	0x00000400
-#define ISTAT_OB2	0x00000200
-#define ISTAT_OB1	0x00000100
-#define ISTAT_TAIL	0x00000080
-#define ISTAT_WDOG	0x00000040
-#define ISTAT_TIME	0x00000020
-#define ISTAT_DMA	0x00000010
-#define ISTAT_SEND	0x00000008
-#define ISTAT_BUF	0x00000004
-#define ISTAT_RECV	0x00000002
-#define ISTAT_BRDY	0x00000001
-
-/* MYRI Registers */
-#define MYRI_RESETOFF	0x00UL
-#define MYRI_RESETON	0x04UL
-#define MYRI_IRQOFF	0x08UL
-#define MYRI_IRQON	0x0cUL
-#define MYRI_WAKEUPOFF	0x10UL
-#define MYRI_WAKEUPON	0x14UL
-#define MYRI_IRQREAD	0x18UL
-	/* 0x1c-->0x3ffc, reserved */
-#define MYRI_LOCALMEM	0x4000UL
-#define MYRI_REG_SIZE	0x25000UL
-
-/* Shared memory interrupt mask. */
-#define SHMEM_IMASK_RX		0x00000002
-#define SHMEM_IMASK_TX		0x00000001
-
-/* Just to make things readable. */
-#define KERNEL_CHANNEL		0
-
-/* The size of this must be >= 129 bytes. */
-struct myri_eeprom {
-	unsigned int		cval;
-	unsigned short		cpuvers;
-	unsigned char		id[6];
-	unsigned int		ramsz;
-	unsigned char		fvers[32];
-	unsigned char		mvers[16];
-	unsigned short		dlval;
-	unsigned short		brd_type;
-	unsigned short		bus_type;
-	unsigned short		prod_code;
-	unsigned int		serial_num;
-	unsigned short		_reserved[24];
-	unsigned int		_unused[2];
-};
-
-/* EEPROM bus types, only SBUS is valid in this driver. */
-#define BUS_TYPE_SBUS		1
-
-/* EEPROM CPU revisions. */
-#define CPUVERS_2_3		0x0203
-#define CPUVERS_3_0		0x0300
-#define CPUVERS_3_1		0x0301
-#define CPUVERS_3_2		0x0302
-#define CPUVERS_4_0		0x0400
-#define CPUVERS_4_1		0x0401
-#define CPUVERS_4_2		0x0402
-#define CPUVERS_5_0		0x0500
-
-/* MYRI Control Registers */
-#define MYRICTRL_CTRL		0x00UL
-#define MYRICTRL_IRQLVL		0x02UL
-#define MYRICTRL_REG_SIZE	0x04UL
-
-/* Global control register defines. */
-#define CONTROL_ROFF		0x8000	/* Reset OFF.		*/
-#define CONTROL_RON		0x4000	/* Reset ON.		*/
-#define CONTROL_EIRQ		0x2000	/* Enable IRQ's.	*/
-#define CONTROL_DIRQ		0x1000	/* Disable IRQ's.	*/
-#define CONTROL_WON		0x0800	/* Wake-up ON.		*/
-
-#define MYRI_SCATTER_ENTRIES	8
-#define MYRI_GATHER_ENTRIES	16
-
-struct myri_sglist {
-	u32 addr;
-	u32 len;
-};
-
-struct myri_rxd {
-	struct myri_sglist myri_scatters[MYRI_SCATTER_ENTRIES];	/* DMA scatter list.*/
-	u32 csum;	/* HW computed checksum.    */
-	u32 ctx;
-	u32 num_sg;	/* Total scatter entries.   */
-};
-
-struct myri_txd {
-	struct myri_sglist myri_gathers[MYRI_GATHER_ENTRIES]; /* DMA scatter list.  */
-	u32 num_sg;	/* Total scatter entries.   */
-	u16 addr[4];	/* XXX address              */
-	u32 chan;
-	u32 len;	/* Total length of packet.  */
-	u32 csum_off;	/* Where data to csum is.   */
-	u32 csum_field;	/* Where csum goes in pkt.  */
-};
-
-#define MYRINET_MTU        8432
-#define RX_ALLOC_SIZE      8448
-#define MYRI_PAD_LEN       2
-#define RX_COPY_THRESHOLD  256
-
-/* These numbers are cast in stone, new firmware is needed if
- * you want to change them.
- */
-#define TX_RING_MAXSIZE    16
-#define RX_RING_MAXSIZE    16
-
-#define TX_RING_SIZE       16
-#define RX_RING_SIZE       16
-
-/* GRRR... */
-static __inline__ int NEXT_RX(int num)
-{
-	/* XXX >=??? */
-	if(++num > RX_RING_SIZE)
-		num = 0;
-	return num;
-}
-
-static __inline__ int PREV_RX(int num)
-{
-	if(--num < 0)
-		num = RX_RING_SIZE;
-	return num;
-}
-
-#define NEXT_TX(num)	(((num) + 1) & (TX_RING_SIZE - 1))
-#define PREV_TX(num)	(((num) - 1) & (TX_RING_SIZE - 1))
-
-#define TX_BUFFS_AVAIL(head, tail)		\
-	((head) <= (tail) ?			\
-	 (head) + (TX_RING_SIZE - 1) - (tail) :	\
-	 (head) - (tail) - 1)
-
-struct sendq {
-	u32	tail;
-	u32	head;
-	u32	hdebug;
-	u32	mdebug;
-	struct myri_txd	myri_txd[TX_RING_MAXSIZE];
-};
-
-struct recvq {
-	u32	head;
-	u32	tail;
-	u32	hdebug;
-	u32	mdebug;
-	struct myri_rxd	myri_rxd[RX_RING_MAXSIZE + 1];
-};
-
-#define MYRI_MLIST_SIZE 8
-
-struct mclist {
-	u32 maxlen;
-	u32 len;
-	u32 cache;
-	struct pair {
-		u8 addr[8];
-		u32 val;
-	} mc_pairs[MYRI_MLIST_SIZE];
-	u8 bcast_addr[8];
-};
-
-struct myri_channel {
-	u32		state;		/* State of the channel.	*/
-	u32		busy;		/* Channel is busy.		*/
-	struct sendq	sendq;		/* Device tx queue.		*/
-	struct recvq	recvq;		/* Device rx queue.		*/
-	struct recvq	recvqa;		/* Device rx queue acked.	*/
-	u32		rbytes;		/* Receive bytes.		*/
-	u32		sbytes;		/* Send bytes.			*/
-	u32		rmsgs;		/* Receive messages.		*/
-	u32		smsgs;		/* Send messages.		*/
-	struct mclist	mclist;		/* Device multicast list.	*/
-};
-
-/* Values for per-channel state. */
-#define STATE_WFH	0		/* Waiting for HOST.		*/
-#define STATE_WFN	1		/* Waiting for NET.		*/
-#define STATE_READY	2		/* Ready.			*/
-
-struct myri_shmem {
-	u8	addr[8];		/* Board's address.		*/
-	u32	nchan;			/* Number of channels.		*/
-	u32	burst;			/* SBUS dma burst enable.	*/
-	u32	shakedown;		/* DarkkkkStarrr Crashesss...	*/
-	u32	send;			/* Send wanted.			*/
-	u32	imask;			/* Interrupt enable mask.	*/
-	u32	mlevel;			/* Map level.			*/
-	u32	debug[4];		/* Misc. debug areas.		*/
-	struct myri_channel channel;	/* Only one channel on a host.	*/
-};
-
-struct myri_eth {
-	/* These are frequently accessed, keep together
-	 * to obtain good cache hit rates.
-	 */
-	spinlock_t			irq_lock;
-	struct myri_shmem __iomem	*shmem;		/* Shared data structures.    */
-	void __iomem			*cregs;		/* Control register space.    */
-	struct recvq __iomem		*rqack;		/* Where we ack rx's.         */
-	struct recvq __iomem		*rq;		/* Where we put buffers.      */
-	struct sendq __iomem		*sq;		/* Where we stuff tx's.       */
-	struct net_device		*dev;		/* Linux/NET dev struct.      */
-	int				tx_old;		/* To speed up tx cleaning.   */
-	void __iomem			*lregs;		/* Quick ptr to LANAI regs.   */
-	struct sk_buff	       *rx_skbs[RX_RING_SIZE+1];/* RX skb's                   */
-	struct sk_buff	       *tx_skbs[TX_RING_SIZE];  /* TX skb's                   */
-
-	/* These are less frequently accessed. */
-	void __iomem			*regs;          /* MyriCOM register space.    */
-	void __iomem			*lanai;		/* View 2 of register space.  */
-	unsigned int			myri_bursts;	/* SBUS bursts.               */
-	struct myri_eeprom		eeprom;		/* Local copy of EEPROM.      */
-	unsigned int			reg_size;	/* Size of register space.    */
-	unsigned int			shmem_base;	/* Offset to shared ram.      */
-	struct platform_device		*myri_op;	/* Our OF device struct.    */
-};
-
-/* We use this to acquire receive skb's that we can DMA directly into. */
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
-{
-	struct sk_buff *skb;
-
-	skb = alloc_skb(length + 64, gfp_flags);
-	if(skb) {
-		int offset = ALIGNED_RX_SKB_ADDR(skb->data);
-
-		if(offset)
-			skb_reserve(skb, offset);
-	}
-	return skb;
-}
-
-#endif /* !(_MYRI_SBUS_H) */
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 2e4b421..2dfee89 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 5cef718..3f89e57 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -809,6 +809,9 @@
 	u64 word;
 	int rv = 0;
 
+	if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+		return 0;
+
 	memset(&req, 0, sizeof(nx_nic_req_t));
 
 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
@@ -959,6 +962,9 @@
 	u64 word;
 	int rv;
 
+	if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+		return 0;
+
 	memset(&req, 0, sizeof(nx_nic_req_t));
 	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
 
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7f99967..ca59b4f 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1279,7 +1279,7 @@
 
 			if (--i == 0)
 				break;
-		};
+		}
 	}
 
 	if (i) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index c0788a3..30f41e6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -92,7 +92,8 @@
 static irqreturn_t netxen_msix_intr(int irq, void *data);
 
 static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
-static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+						      struct rtnl_link_stats64 *stats);
 static int netxen_nic_set_mac(struct net_device *netdev, void *p);
 
 /*  PCI Device ID Table  */
@@ -520,7 +521,7 @@
 	.ndo_open	   = netxen_nic_open,
 	.ndo_stop	   = netxen_nic_close,
 	.ndo_start_xmit    = netxen_nic_xmit_frame,
-	.ndo_get_stats	   = netxen_nic_get_stats,
+	.ndo_get_stats64   = netxen_nic_get_stats,
 	.ndo_validate_addr = eth_validate_addr,
 	.ndo_set_multicast_list = netxen_set_multicast_list,
 	.ndo_set_mac_address    = netxen_nic_set_mac,
@@ -2110,10 +2111,10 @@
 	clear_bit(__NX_RESETTING, &adapter->state);
 }
 
-static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+						      struct rtnl_link_stats64 *stats)
 {
 	struct netxen_adapter *adapter = netdev_priv(netdev);
-	struct net_device_stats *stats = &netdev->stats;
 
 	stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
 	stats->tx_packets = adapter->stats.xmitfinished;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index cc25bff0..1c7b790 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/netdevice.h>
@@ -6248,9 +6249,10 @@
 		niu_sync_bmac_stats(np);
 }
 
-static void niu_get_rx_stats(struct niu *np)
+static void niu_get_rx_stats(struct niu *np,
+			     struct rtnl_link_stats64 *stats)
 {
-	unsigned long pkts, dropped, errors, bytes;
+	u64 pkts, dropped, errors, bytes;
 	struct rx_ring_info *rx_rings;
 	int i;
 
@@ -6272,15 +6274,16 @@
 	}
 
 no_rings:
-	np->dev->stats.rx_packets = pkts;
-	np->dev->stats.rx_bytes = bytes;
-	np->dev->stats.rx_dropped = dropped;
-	np->dev->stats.rx_errors = errors;
+	stats->rx_packets = pkts;
+	stats->rx_bytes = bytes;
+	stats->rx_dropped = dropped;
+	stats->rx_errors = errors;
 }
 
-static void niu_get_tx_stats(struct niu *np)
+static void niu_get_tx_stats(struct niu *np,
+			     struct rtnl_link_stats64 *stats)
 {
-	unsigned long pkts, errors, bytes;
+	u64 pkts, errors, bytes;
 	struct tx_ring_info *tx_rings;
 	int i;
 
@@ -6299,20 +6302,22 @@
 	}
 
 no_rings:
-	np->dev->stats.tx_packets = pkts;
-	np->dev->stats.tx_bytes = bytes;
-	np->dev->stats.tx_errors = errors;
+	stats->tx_packets = pkts;
+	stats->tx_bytes = bytes;
+	stats->tx_errors = errors;
 }
 
-static struct net_device_stats *niu_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+					       struct rtnl_link_stats64 *stats)
 {
 	struct niu *np = netdev_priv(dev);
 
 	if (netif_running(dev)) {
-		niu_get_rx_stats(np);
-		niu_get_tx_stats(np);
+		niu_get_rx_stats(np, stats);
+		niu_get_tx_stats(np, stats);
 	}
-	return &dev->stats;
+
+	return stats;
 }
 
 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
@@ -9710,7 +9715,7 @@
 	.ndo_open		= niu_open,
 	.ndo_stop		= niu_close,
 	.ndo_start_xmit		= niu_start_xmit,
-	.ndo_get_stats		= niu_get_stats,
+	.ndo_get_stats64	= niu_get_stats,
 	.ndo_set_multicast_list	= niu_set_rx_mode,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= niu_set_mac_addr,
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 3e4040f..d3afb45 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -106,6 +106,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ip.h>	/* for iph */
 #include <linux/in.h>	/* for IPPROTO_... */
 #include <linux/compiler.h>
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index b264f0f..429e08c 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -9,6 +9,7 @@
 #include <linux/capability.h>
 #include <linux/dma-mapping.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 9a09e24..d4cbc29 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -109,11 +109,7 @@
 	value = phy_read(phydev, 16);
 	value |= 0x3;
 
-	err = phy_write(phydev, 16, value);
-	if (err < 0)
-		return err;
-
-	return err;
+	return phy_write(phydev, 16, value);
 }
 
 static int ip175c_read_status(struct phy_device *phydev)
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index c554a39..c6ba643 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -30,6 +30,7 @@
 #include <linux/ppp_channel.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 2573f52..736a39e 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -44,6 +44,7 @@
 #include <linux/spinlock.h>
 #include <linux/completion.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index b1f251d..35e47c3 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -28,6 +28,7 @@
 
 #undef DEBUG
 
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -1009,7 +1010,7 @@
 				netdev = card->netdev[i];
 				break;
 			}
-		};
+		}
 		if (GELIC_PORT_MAX <= i) {
 			pr_info("%s: unknown packet vid=%x\n", __func__, vid);
 			goto refill;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 5f597ca..1f97db1 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1267,6 +1267,9 @@
 	pep->tx_skb[tx_index] = skb;
 	desc->byte_cnt = length;
 	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+
+	skb_tx_timestamp(skb);
+
 	wmb();
 	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
 			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index d328507..7d8483f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -7,6 +7,7 @@
 #ifndef _QLGE_H_
 #define _QLGE_H_
 
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/rtnetlink.h>
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 19b00fa..9b67bfe 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -650,8 +650,6 @@
 		return -EINVAL;
 
 	status = ql_mb_set_port_cfg(qdev);
-	if (status)
-		return status;
 	return status;
 }
 
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 200a363..00f06e9 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -836,6 +836,9 @@
 	descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
 		skb->data, skb->len, PCI_DMA_TODEVICE));
 	descptr->status = DSC_OWNER_MAC;
+
+	skb_tx_timestamp(skb);
+
 	/* Trigger the MAC to check the TX descriptor */
 	iowrite16(0x01, ioaddr + MTPR);
 	lp->tx_insert_ptr = descptr->vndescp;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 05d8178..f8c4435 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -22,6 +22,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
@@ -666,7 +667,18 @@
 	struct rtl8169_counters counters;
 	u32 saved_wolopts;
 
-	const struct firmware *fw;
+	struct rtl_fw {
+		const struct firmware *fw;
+
+#define RTL_VER_SIZE		32
+
+		char version[RTL_VER_SIZE];
+
+		struct rtl_fw_phy_action {
+			__le32 *code;
+			size_t size;
+		} phy_action;
+	} *rtl_fw;
 #define RTL_FIRMWARE_UNKNOWN	ERR_PTR(-EAGAIN);
 };
 
@@ -1221,12 +1233,14 @@
 				struct ethtool_drvinfo *info)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
+	struct rtl_fw *rtl_fw = tp->rtl_fw;
 
 	strcpy(info->driver, MODULENAME);
 	strcpy(info->version, RTL8169_VERSION);
 	strcpy(info->bus_info, pci_name(tp->pci_dev));
-	strncpy(info->fw_version, IS_ERR_OR_NULL(tp->fw) ? "N/A" :
-		rtl_lookup_firmware_name(tp), sizeof(info->fw_version) - 1);
+	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
+	strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
+	       rtl_fw->version);
 }
 
 static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1741,21 +1755,75 @@
 #define PHY_DELAY_MS		0xe0000000
 #define PHY_WRITE_ERI_WORD	0xf0000000
 
-static void
-rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
+struct fw_info {
+	u32	magic;
+	char	version[RTL_VER_SIZE];
+	__le32	fw_start;
+	__le32	fw_len;
+	u8	chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE	sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
 {
-	__le32 *phytable = (__le32 *)fw->data;
-	struct net_device *dev = tp->dev;
-	size_t index, fw_size = fw->size / sizeof(*phytable);
-	u32 predata, count;
+	const struct firmware *fw = rtl_fw->fw;
+	struct fw_info *fw_info = (struct fw_info *)fw->data;
+	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+	char *version = rtl_fw->version;
+	bool rc = false;
 
-	if (fw->size % sizeof(*phytable)) {
-		netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
-		return;
+	if (fw->size < FW_OPCODE_SIZE)
+		goto out;
+
+	if (!fw_info->magic) {
+		size_t i, size, start;
+		u8 checksum = 0;
+
+		if (fw->size < sizeof(*fw_info))
+			goto out;
+
+		for (i = 0; i < fw->size; i++)
+			checksum += fw->data[i];
+		if (checksum != 0)
+			goto out;
+
+		start = le32_to_cpu(fw_info->fw_start);
+		if (start > fw->size)
+			goto out;
+
+		size = le32_to_cpu(fw_info->fw_len);
+		if (size > (fw->size - start) / FW_OPCODE_SIZE)
+			goto out;
+
+		memcpy(version, fw_info->version, RTL_VER_SIZE);
+
+		pa->code = (__le32 *)(fw->data + start);
+		pa->size = size;
+	} else {
+		if (fw->size % FW_OPCODE_SIZE)
+			goto out;
+
+		strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+
+		pa->code = (__le32 *)fw->data;
+		pa->size = fw->size / FW_OPCODE_SIZE;
 	}
+	version[RTL_VER_SIZE - 1] = 0;
 
-	for (index = 0; index < fw_size; index++) {
-		u32 action = le32_to_cpu(phytable[index]);
+	rc = true;
+out:
+	return rc;
+}
+
+static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
+			   struct rtl_fw_phy_action *pa)
+{
+	bool rc = false;
+	size_t index;
+
+	for (index = 0; index < pa->size; index++) {
+		u32 action = le32_to_cpu(pa->code[index]);
 		u32 regno = (action & 0x0fff0000) >> 16;
 
 		switch(action & 0xf0000000) {
@@ -1771,25 +1839,25 @@
 
 		case PHY_BJMPN:
 			if (regno > index) {
-				netif_err(tp, probe, tp->dev,
+				netif_err(tp, ifup, tp->dev,
 					  "Out of range of firmware\n");
-				return;
+				goto out;
 			}
 			break;
 		case PHY_READCOUNT_EQ_SKIP:
-			if (index + 2 >= fw_size) {
-				netif_err(tp, probe, tp->dev,
+			if (index + 2 >= pa->size) {
+				netif_err(tp, ifup, tp->dev,
 					  "Out of range of firmware\n");
-				return;
+				goto out;
 			}
 			break;
 		case PHY_COMP_EQ_SKIPN:
 		case PHY_COMP_NEQ_SKIPN:
 		case PHY_SKIPN:
-			if (index + 1 + regno >= fw_size) {
-				netif_err(tp, probe, tp->dev,
+			if (index + 1 + regno >= pa->size) {
+				netif_err(tp, ifup, tp->dev,
 					  "Out of range of firmware\n");
-				return;
+				goto out;
 			}
 			break;
 
@@ -1797,17 +1865,42 @@
 		case PHY_WRITE_MAC_BYTE:
 		case PHY_WRITE_ERI_WORD:
 		default:
-			netif_err(tp, probe, tp->dev,
+			netif_err(tp, ifup, tp->dev,
 				  "Invalid action 0x%08x\n", action);
-			return;
+			goto out;
 		}
 	}
+	rc = true;
+out:
+	return rc;
+}
 
-	predata = 0;
-	count = 0;
+static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+	struct net_device *dev = tp->dev;
+	int rc = -EINVAL;
 
-	for (index = 0; index < fw_size; ) {
-		u32 action = le32_to_cpu(phytable[index]);
+	if (!rtl_fw_format_ok(tp, rtl_fw)) {
+		netif_err(tp, ifup, dev, "invalid firwmare\n");
+		goto out;
+	}
+
+	if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
+		rc = 0;
+out:
+	return rc;
+}
+
+static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+	u32 predata, count;
+	size_t index;
+
+	predata = count = 0;
+
+	for (index = 0; index < pa->size; ) {
+		u32 action = le32_to_cpu(pa->code[index]);
 		u32 data = action & 0x0000ffff;
 		u32 regno = (action & 0x0fff0000) >> 16;
 
@@ -1879,18 +1972,20 @@
 
 static void rtl_release_firmware(struct rtl8169_private *tp)
 {
-	if (!IS_ERR_OR_NULL(tp->fw))
-		release_firmware(tp->fw);
-	tp->fw = RTL_FIRMWARE_UNKNOWN;
+	if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+		release_firmware(tp->rtl_fw->fw);
+		kfree(tp->rtl_fw);
+	}
+	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 }
 
 static void rtl_apply_firmware(struct rtl8169_private *tp)
 {
-	const struct firmware *fw = tp->fw;
+	struct rtl_fw *rtl_fw = tp->rtl_fw;
 
 	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
-	if (!IS_ERR_OR_NULL(fw))
-		rtl_phy_write_fw(tp, fw);
+	if (!IS_ERR_OR_NULL(rtl_fw))
+		rtl_phy_write_fw(tp, rtl_fw);
 }
 
 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -3443,7 +3538,7 @@
 	tp->timer.data = (unsigned long) dev;
 	tp->timer.function = rtl8169_phy_timer;
 
-	tp->fw = RTL_FIRMWARE_UNKNOWN;
+	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
 	rc = register_netdev(dev);
 	if (rc < 0)
@@ -3512,25 +3607,48 @@
 	pci_set_drvdata(pdev, NULL);
 }
 
+static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+{
+	struct rtl_fw *rtl_fw;
+	const char *name;
+	int rc = -ENOMEM;
+
+	name = rtl_lookup_firmware_name(tp);
+	if (!name)
+		goto out_no_firmware;
+
+	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
+	if (!rtl_fw)
+		goto err_warn;
+
+	rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
+	if (rc < 0)
+		goto err_free;
+
+	rc = rtl_check_firmware(tp, rtl_fw);
+	if (rc < 0)
+		goto err_release_firmware;
+
+	tp->rtl_fw = rtl_fw;
+out:
+	return;
+
+err_release_firmware:
+	release_firmware(rtl_fw->fw);
+err_free:
+	kfree(rtl_fw);
+err_warn:
+	netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
+		   name, rc);
+out_no_firmware:
+	tp->rtl_fw = NULL;
+	goto out;
+}
+
 static void rtl_request_firmware(struct rtl8169_private *tp)
 {
-	/* Return early if the firmware is already loaded / cached. */
-	if (IS_ERR(tp->fw)) {
-		const char *name;
-
-		name = rtl_lookup_firmware_name(tp);
-		if (name) {
-			int rc;
-
-			rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
-			if (rc >= 0)
-				return;
-
-			netif_warn(tp, ifup, tp->dev, "unable to load "
-				"firmware patch %s (%d)\n", name, rc);
-		}
-		tp->fw = NULL;
-	}
+	if (IS_ERR(tp->rtl_fw))
+		rtl_request_uncached_firmware(tp);
 }
 
 static int rtl8169_open(struct net_device *dev)
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index fa74314..9da4733 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -22,6 +22,7 @@
  * matching, so you need to enable IFF_PROMISC when using it.
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f2a2b94..bafa23a 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -10,6 +10,7 @@
 
 #include <linux/bitops.h>
 #include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b436e00..8ad7bfb 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -21,6 +21,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 598bf7a..a2eb341 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -3,6 +3,7 @@
  */
 #ifndef _SKGE_H
 #define _SKGE_H
+#include <linux/interrupt.h>
 
 /* PCI config registers */
 #define PCI_DEV_REG1	0x40
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3ee41da..d252cb1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/pci.h>
+#include <linux/interrupt.h>
 #include <linux/ip.h>
 #include <linux/slab.h>
 #include <net/ip.h>
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ab9e3b7..0a0a664 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -297,7 +297,7 @@
 		lcs = cs;
 		cs = cs->next;
 		comp->sls_o_searches++;
-	};
+	}
 	/*
 	 * Didn't find it -- re-use oldest cstate.  Send an
 	 * uncompressed packet that tells the other side what
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index d07c39c..34934fb 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -42,6 +42,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 235a3c6..ba44ede 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -62,6 +62,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/isapnp.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c6d47d1..b9016a3 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -37,6 +37,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -1473,6 +1474,7 @@
 
 	pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
 	freespace -= (skb->len + 32);
+	skb_tx_timestamp(skb);
 	dev_kfree_skb(skb);
 
 	if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 4c92ad8..459726f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -19,6 +19,7 @@
  ***************************************************************************
  */
 
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
@@ -1030,6 +1031,8 @@
 	pd->tx_ring[index].status = TDES0_OWN_;
 	wmb();
 
+	skb_tx_timestamp(skb);
+
 	/* kick the DMA */
 	smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
 	smsc9420_pci_flush_write(pd);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 949f124..9bc6c20 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -31,6 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/in.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/gfp.h>
 #include <linux/ioport.h>
 #include <linux/ip.h>
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 36045f3..860a508 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -30,6 +30,7 @@
 #define DRV_VERSION	"2.1"
 #define DRV_RELDATE	"July  6, 2008"
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index ae5213a..720c5a1 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -24,6 +24,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
+#include <linux/interrupt.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
 
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index e25e44a..d4adc80 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1079,6 +1079,8 @@
 
 	dev->stats.tx_bytes += skb->len;
 
+	skb_tx_timestamp(skb);
+
 	priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
 	return NETDEV_TX_OK;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index ab59300..71d4a03 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -10,25 +10,6 @@
  * NAPI and NETPOLL support
  * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
  *
- * TODO:
- *  - Now that the driver was significantly simplified, I need to rework
- *    the locking. I'm sure we don't need _2_ spinlocks, and we probably
- *    can avoid taking most of them for so long period of time (and schedule
- *    instead). The main issues at this point are caused by the netdev layer
- *    though:
- *
- *    gem_change_mtu() and gem_set_multicast() are called with a read_lock()
- *    help by net/core/dev.c, thus they can't schedule. That means they can't
- *    call napi_disable() neither, thus force gem_poll() to keep a spinlock
- *    where it could have been dropped. change_mtu especially would love also to
- *    be able to msleep instead of horrid locked delays when resetting the HW,
- *    but that read_lock() makes it impossible, unless I defer it's action to
- *    the reset task, which means it'll be asynchronous (won't take effect until
- *    the system schedules a bit).
- *
- *    Also, it would probably be possible to also remove most of the long-life
- *    locking in open/resume code path (gem_reinit_chip) by beeing more careful
- *    about when we can start taking interrupts or get xmit() called...
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -57,7 +38,6 @@
 #include <linux/workqueue.h>
 #include <linux/if_vlan.h>
 #include <linux/bitops.h>
-#include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/gfp.h>
 
@@ -95,12 +75,11 @@
 			 SUPPORTED_Pause | SUPPORTED_Autoneg)
 
 #define DRV_NAME	"sungem"
-#define DRV_VERSION	"0.98"
-#define DRV_RELDATE	"8/24/03"
-#define DRV_AUTHOR	"David S. Miller (davem@redhat.com)"
+#define DRV_VERSION	"1.0"
+#define DRV_AUTHOR	"David S. Miller <davem@redhat.com>"
 
 static char version[] __devinitdata =
-        DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+        DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
 
 MODULE_AUTHOR(DRV_AUTHOR);
 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
@@ -218,6 +197,7 @@
 {
 	/* Disable all interrupts, including TXDONE */
 	writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+	(void)readl(gp->regs + GREG_IMASK); /* write posting */
 }
 
 static void gem_get_cell(struct gem *gp)
@@ -247,6 +227,29 @@
 #endif /* CONFIG_PPC_PMAC */
 }
 
+static inline void gem_netif_stop(struct gem *gp)
+{
+	gp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	napi_disable(&gp->napi);
+	netif_tx_disable(gp->dev);
+}
+
+static inline void gem_netif_start(struct gem *gp)
+{
+	/* NOTE: unconditional netif_wake_queue is only
+	 * appropriate so long as all callers are assured to
+	 * have free tx slots.
+	 */
+	netif_wake_queue(gp->dev);
+	napi_enable(&gp->napi);
+}
+
+static void gem_schedule_reset(struct gem *gp)
+{
+	gp->reset_task_pending = 1;
+	schedule_work(&gp->reset_task);
+}
+
 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
 {
 	if (netif_msg_intr(gp))
@@ -604,56 +607,46 @@
 				gp->dev->name);
 		dev->stats.rx_errors++;
 
-		goto do_reset;
+		return 1;
 	}
 
 	if (gem_status & GREG_STAT_PCS) {
 		if (gem_pcs_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	if (gem_status & GREG_STAT_TXMAC) {
 		if (gem_txmac_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	if (gem_status & GREG_STAT_RXMAC) {
 		if (gem_rxmac_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	if (gem_status & GREG_STAT_MAC) {
 		if (gem_mac_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	if (gem_status & GREG_STAT_MIF) {
 		if (gem_mif_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	if (gem_status & GREG_STAT_PCIERR) {
 		if (gem_pci_interrupt(dev, gp, gem_status))
-			goto do_reset;
+			return 1;
 	}
 
 	return 0;
-
-do_reset:
-	gp->reset_task_pending = 1;
-	schedule_work(&gp->reset_task);
-
-	return 1;
 }
 
 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
 {
 	int entry, limit;
 
-	if (netif_msg_intr(gp))
-		printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
-			gp->dev->name, gem_status);
-
 	entry = gp->tx_old;
 	limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
 	while (entry != limit) {
@@ -697,13 +690,27 @@
 		}
 
 		dev->stats.tx_packets++;
-		dev_kfree_skb_irq(skb);
+		dev_kfree_skb(skb);
 	}
 	gp->tx_old = entry;
 
-	if (netif_queue_stopped(dev) &&
-	    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
-		netif_wake_queue(dev);
+	/* Need to make the tx_old update visible to gem_start_xmit()
+	 * before checking for netif_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that gem_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_queue_stopped(dev) &&
+		     TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
+		struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+		__netif_tx_lock(txq, smp_processor_id());
+		if (netif_queue_stopped(dev) &&
+		    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+			netif_wake_queue(dev);
+		__netif_tx_unlock(txq);
+	}
 }
 
 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
@@ -736,6 +743,21 @@
 	}
 }
 
+#define ALIGNED_RX_SKB_ADDR(addr) \
+        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
+						gfp_t gfp_flags)
+{
+	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
+
+	if (likely(skb)) {
+		unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
+		skb_reserve(skb, offset);
+		skb->dev = dev;
+	}
+	return skb;
+}
+
 static int gem_rx(struct gem *gp, int work_to_do)
 {
 	struct net_device *dev = gp->dev;
@@ -799,7 +821,7 @@
 		if (len > RX_COPY_THRESHOLD) {
 			struct sk_buff *new_skb;
 
-			new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+			new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
 			if (new_skb == NULL) {
 				drops++;
 				goto drop_it;
@@ -808,7 +830,6 @@
 				       RX_BUF_ALLOC_SIZE(gp),
 				       PCI_DMA_FROMDEVICE);
 			gp->rx_skbs[entry] = new_skb;
-			new_skb->dev = gp->dev;
 			skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
 			rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
 							       virt_to_page(new_skb->data),
@@ -820,7 +841,7 @@
 			/* Trim the original skb for the netif. */
 			skb_trim(skb, len);
 		} else {
-			struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+			struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
 
 			if (copy_skb == NULL) {
 				drops++;
@@ -842,7 +863,7 @@
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		skb->protocol = eth_type_trans(skb, gp->dev);
 
-		netif_receive_skb(skb);
+		napi_gro_receive(&gp->napi, skb);
 
 		dev->stats.rx_packets++;
 		dev->stats.rx_bytes += len;
@@ -865,28 +886,32 @@
 {
 	struct gem *gp = container_of(napi, struct gem, napi);
 	struct net_device *dev = gp->dev;
-	unsigned long flags;
 	int work_done;
 
-	/*
-	 * NAPI locking nightmare: See comment at head of driver
-	 */
-	spin_lock_irqsave(&gp->lock, flags);
-
 	work_done = 0;
 	do {
 		/* Handle anomalies */
-		if (gp->status & GREG_STAT_ABNORMAL) {
-			if (gem_abnormal_irq(dev, gp, gp->status))
-				break;
+		if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
+			struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+			int reset;
+
+			/* We run the abnormal interrupt handling code with
+			 * the Tx lock. It only resets the Rx portion of the
+			 * chip, but we need to guard it against DMA being
+			 * restarted by the link poll timer
+			 */
+			__netif_tx_lock(txq, smp_processor_id());
+			reset = gem_abnormal_irq(dev, gp, gp->status);
+			__netif_tx_unlock(txq);
+			if (reset) {
+				gem_schedule_reset(gp);
+				napi_complete(napi);
+				return work_done;
+			}
 		}
 
 		/* Run TX completion thread */
-		spin_lock(&gp->tx_lock);
 		gem_tx(dev, gp, gp->status);
-		spin_unlock(&gp->tx_lock);
-
-		spin_unlock_irqrestore(&gp->lock, flags);
 
 		/* Run RX thread. We don't use any locking here,
 		 * code willing to do bad things - like cleaning the
@@ -898,16 +923,12 @@
 		if (work_done >= budget)
 			return work_done;
 
-		spin_lock_irqsave(&gp->lock, flags);
-
 		gp->status = readl(gp->regs + GREG_STAT);
 	} while (gp->status & GREG_STAT_NAPI);
 
-	__napi_complete(napi);
+	napi_complete(napi);
 	gem_enable_ints(gp);
 
-	spin_unlock_irqrestore(&gp->lock, flags);
-
 	return work_done;
 }
 
@@ -915,32 +936,23 @@
 {
 	struct net_device *dev = dev_id;
 	struct gem *gp = netdev_priv(dev);
-	unsigned long flags;
-
-	/* Swallow interrupts when shutting the chip down, though
-	 * that shouldn't happen, we should have done free_irq() at
-	 * this point...
-	 */
-	if (!gp->running)
-		return IRQ_HANDLED;
-
-	spin_lock_irqsave(&gp->lock, flags);
 
 	if (napi_schedule_prep(&gp->napi)) {
 		u32 gem_status = readl(gp->regs + GREG_STAT);
 
-		if (gem_status == 0) {
+		if (unlikely(gem_status == 0)) {
 			napi_enable(&gp->napi);
-			spin_unlock_irqrestore(&gp->lock, flags);
 			return IRQ_NONE;
 		}
+		if (netif_msg_intr(gp))
+			printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
+			       gp->dev->name, gem_status);
+
 		gp->status = gem_status;
 		gem_disable_ints(gp);
 		__napi_schedule(&gp->napi);
 	}
 
-	spin_unlock_irqrestore(&gp->lock, flags);
-
 	/* If polling was disabled at the time we received that
 	 * interrupt, we may return IRQ_HANDLED here while we
 	 * should return IRQ_NONE. No big deal...
@@ -951,10 +963,11 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gem_poll_controller(struct net_device *dev)
 {
-	/* gem_interrupt is safe to reentrance so no need
-	 * to disable_irq here.
-	 */
-	gem_interrupt(dev->irq, dev);
+	struct gem *gp = netdev_priv(dev);
+
+	disable_irq(gp->pdev->irq);
+	gem_interrupt(gp->pdev->irq, dev);
+	enable_irq(gp->pdev->irq);
 }
 #endif
 
@@ -963,10 +976,7 @@
 	struct gem *gp = netdev_priv(dev);
 
 	netdev_err(dev, "transmit timed out, resetting\n");
-	if (!gp->running) {
-		netdev_err(dev, "hrm.. hw not running !\n");
-		return;
-	}
+
 	netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
 		   readl(gp->regs + TXDMA_CFG),
 		   readl(gp->regs + MAC_TXSTAT),
@@ -976,14 +986,7 @@
 		   readl(gp->regs + MAC_RXSTAT),
 		   readl(gp->regs + MAC_RXCFG));
 
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
-
-	gp->reset_task_pending = 1;
-	schedule_work(&gp->reset_task);
-
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
+	gem_schedule_reset(gp);
 }
 
 static __inline__ int gem_intme(int entry)
@@ -1001,7 +1004,6 @@
 	struct gem *gp = netdev_priv(dev);
 	int entry;
 	u64 ctrl;
-	unsigned long flags;
 
 	ctrl = 0;
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1013,21 +1015,12 @@
 			(csum_stuff_off << 21));
 	}
 
-	if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
-		/* Tell upper layer to requeue */
-		return NETDEV_TX_LOCKED;
-	}
-	/* We raced with gem_do_stop() */
-	if (!gp->running) {
-		spin_unlock_irqrestore(&gp->tx_lock, flags);
-		return NETDEV_TX_BUSY;
-	}
-
-	/* This is a hard error, log it. */
-	if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
-		netif_stop_queue(dev);
-		spin_unlock_irqrestore(&gp->tx_lock, flags);
-		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+	if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+		/* This is a hard error, log it. */
+		if (!netif_queue_stopped(dev)) {
+			netif_stop_queue(dev);
+			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+		}
 		return NETDEV_TX_BUSY;
 	}
 
@@ -1104,17 +1097,23 @@
 	}
 
 	gp->tx_new = entry;
-	if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
+	if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
 		netif_stop_queue(dev);
 
+		/* netif_stop_queue() must be done before checking
+		 * checking tx index in TX_BUFFS_AVAIL() below, because
+		 * in gem_tx(), we update tx_old before checking for
+		 * netif_queue_stopped().
+		 */
+		smp_mb();
+		if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+			netif_wake_queue(dev);
+	}
 	if (netif_msg_tx_queued(gp))
 		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
 		       dev->name, entry, skb->len);
 	mb();
 	writel(gp->tx_new, gp->regs + TXDMA_KICK);
-	spin_unlock_irqrestore(&gp->tx_lock, flags);
-
-	dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
 
 	return NETDEV_TX_OK;
 }
@@ -1184,7 +1183,6 @@
 
 #define STOP_TRIES 32
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_reset(struct gem *gp)
 {
 	int limit;
@@ -1213,7 +1211,6 @@
 		gem_pcs_reinit_adv(gp);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_start_dma(struct gem *gp)
 {
 	u32 val;
@@ -1236,8 +1233,7 @@
 	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
- * actually stopped before about 4ms tho ...
+/* DMA won't be actually stopped before about 4ms tho ...
  */
 static void gem_stop_dma(struct gem *gp)
 {
@@ -1259,7 +1255,6 @@
 }
 
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 // XXX dbl check what that function should do when called on PCS PHY
 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
 {
@@ -1319,7 +1314,7 @@
 	/* If we are asleep, we don't try to actually setup the PHY, we
 	 * just store the settings
 	 */
-	if (gp->asleep) {
+	if (!netif_device_present(gp->dev)) {
 		gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
 		gp->phy_mii.speed = speed;
 		gp->phy_mii.duplex = duplex;
@@ -1345,13 +1340,12 @@
 
 /* A link-up condition has occurred, initialize and enable the
  * rest of the chip.
- *
- * Must be invoked under gp->lock and gp->tx_lock.
  */
 static int gem_set_link_modes(struct gem *gp)
 {
-	u32 val;
+	struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
 	int full_duplex, speed, pause;
+	u32 val;
 
 	full_duplex = 0;
 	speed = SPEED_10;
@@ -1375,8 +1369,11 @@
 	netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
 		   speed, (full_duplex ? "full" : "half"));
 
-	if (!gp->running)
-		return 0;
+
+	/* We take the tx queue lock to avoid collisions between
+	 * this code, the tx path and the NAPI-driven error path
+	 */
+	__netif_tx_lock(txq, smp_processor_id());
 
 	val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
 	if (full_duplex) {
@@ -1425,18 +1422,6 @@
 			pause = 1;
 	}
 
-	if (netif_msg_link(gp)) {
-		if (pause) {
-			netdev_info(gp->dev,
-				    "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
-				    gp->rx_fifo_sz,
-				    gp->rx_pause_off,
-				    gp->rx_pause_on);
-		} else {
-			netdev_info(gp->dev, "Pause is disabled\n");
-		}
-	}
-
 	if (!full_duplex)
 		writel(512, gp->regs + MAC_STIME);
 	else
@@ -1450,10 +1435,23 @@
 
 	gem_start_dma(gp);
 
+	__netif_tx_unlock(txq);
+
+	if (netif_msg_link(gp)) {
+		if (pause) {
+			netdev_info(gp->dev,
+				    "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+				    gp->rx_fifo_sz,
+				    gp->rx_pause_off,
+				    gp->rx_pause_on);
+		} else {
+			netdev_info(gp->dev, "Pause is disabled\n");
+		}
+	}
+
 	return 0;
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static int gem_mdio_link_not_up(struct gem *gp)
 {
 	switch (gp->lstate) {
@@ -1501,20 +1499,12 @@
 static void gem_link_timer(unsigned long data)
 {
 	struct gem *gp = (struct gem *) data;
+	struct net_device *dev = gp->dev;
 	int restart_aneg = 0;
 
-	if (gp->asleep)
-		return;
-
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
-	gem_get_cell(gp);
-
-	/* If the reset task is still pending, we just
-	 * reschedule the link timer
-	 */
+	/* There's no point doing anything if we're going to be reset */
 	if (gp->reset_task_pending)
-		goto restart;
+		return;
 
 	if (gp->phy_type == phy_serialink ||
 	    gp->phy_type == phy_serdes) {
@@ -1528,7 +1518,7 @@
 				goto restart;
 
 			gp->lstate = link_up;
-			netif_carrier_on(gp->dev);
+			netif_carrier_on(dev);
 			(void)gem_set_link_modes(gp);
 		}
 		goto restart;
@@ -1544,12 +1534,12 @@
 			gp->last_forced_speed = gp->phy_mii.speed;
 			gp->timer_ticks = 5;
 			if (netif_msg_link(gp))
-				netdev_info(gp->dev,
+				netdev_info(dev,
 					    "Got link after fallback, retrying autoneg once...\n");
 			gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
 		} else if (gp->lstate != link_up) {
 			gp->lstate = link_up;
-			netif_carrier_on(gp->dev);
+			netif_carrier_on(dev);
 			if (gem_set_link_modes(gp))
 				restart_aneg = 1;
 		}
@@ -1559,11 +1549,11 @@
 		 */
 		if (gp->lstate == link_up) {
 			gp->lstate = link_down;
-			netif_info(gp, link, gp->dev, "Link down\n");
-			netif_carrier_off(gp->dev);
-			gp->reset_task_pending = 1;
-			schedule_work(&gp->reset_task);
-			restart_aneg = 1;
+			netif_info(gp, link, dev, "Link down\n");
+			netif_carrier_off(dev);
+			gem_schedule_reset(gp);
+			/* The reset task will restart the timer */
+			return;
 		} else if (++gp->timer_ticks > 10) {
 			if (found_mii_phy(gp))
 				restart_aneg = gem_mdio_link_not_up(gp);
@@ -1573,17 +1563,12 @@
 	}
 	if (restart_aneg) {
 		gem_begin_auto_negotiation(gp, NULL);
-		goto out_unlock;
+		return;
 	}
 restart:
 	mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
-out_unlock:
-	gem_put_cell(gp);
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_clean_rings(struct gem *gp)
 {
 	struct gem_init_block *gb = gp->init_block;
@@ -1634,7 +1619,6 @@
 	}
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_init_rings(struct gem *gp)
 {
 	struct gem_init_block *gb = gp->init_block;
@@ -1653,7 +1637,7 @@
 		struct sk_buff *skb;
 		struct gem_rxd *rxd = &gb->rxd[i];
 
-		skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+		skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
 		if (!skb) {
 			rxd->buffer = 0;
 			rxd->status_word = 0;
@@ -1661,7 +1645,6 @@
 		}
 
 		gp->rx_skbs[i] = skb;
-		skb->dev = dev;
 		skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
 		dma_addr = pci_map_page(gp->pdev,
 					virt_to_page(skb->data),
@@ -1737,7 +1720,7 @@
 
 	if (gp->phy_type == phy_mii_mdio0 ||
 	    gp->phy_type == phy_mii_mdio1) {
-	    	// XXX check for errors
+		/* Reset and detect MII PHY */
 		mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
 
 		/* Init PHY */
@@ -1753,13 +1736,15 @@
 	gp->lstate = link_down;
 	netif_carrier_off(gp->dev);
 
-	/* Can I advertise gigabit here ? I'd need BCM PHY docs... */
-	spin_lock_irq(&gp->lock);
+	/* Print things out */
+	if (gp->phy_type == phy_mii_mdio0 ||
+	    gp->phy_type == phy_mii_mdio1)
+		netdev_info(gp->dev, "Found %s PHY\n",
+			    gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+
 	gem_begin_auto_negotiation(gp, NULL);
-	spin_unlock_irq(&gp->lock);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_init_dma(struct gem *gp)
 {
 	u64 desc_dma = (u64) gp->gblock_dvma;
@@ -1797,7 +1782,6 @@
 		       gp->regs + RXDMA_BLANK);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static u32 gem_setup_multicast(struct gem *gp)
 {
 	u32 rxcfg = 0;
@@ -1835,7 +1819,6 @@
 	return rxcfg;
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_init_mac(struct gem *gp)
 {
 	unsigned char *e = &gp->dev->dev_addr[0];
@@ -1918,7 +1901,6 @@
 		writel(0, gp->regs + WOL_WAKECSR);
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_init_pause_thresholds(struct gem *gp)
 {
        	u32 cfg;
@@ -2079,7 +2061,6 @@
 	return 0;
 }
 
-/* Must be invoked under gp->lock and gp->tx_lock. */
 static void gem_reinit_chip(struct gem *gp)
 {
 	/* Reset the chip */
@@ -2100,11 +2081,9 @@
 }
 
 
-/* Must be invoked with no lock held. */
 static void gem_stop_phy(struct gem *gp, int wol)
 {
 	u32 mifcfg;
-	unsigned long flags;
 
 	/* Let the chip settle down a bit, it seems that helps
 	 * for sleep mode on some models
@@ -2150,15 +2129,9 @@
 	writel(0, gp->regs + RXDMA_CFG);
 
 	if (!wol) {
-		spin_lock_irqsave(&gp->lock, flags);
-		spin_lock(&gp->tx_lock);
 		gem_reset(gp);
 		writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
 		writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
-		spin_unlock(&gp->tx_lock);
-		spin_unlock_irqrestore(&gp->lock, flags);
-
-		/* No need to take the lock here */
 
 		if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
 			gp->phy_mii.def->ops->suspend(&gp->phy_mii);
@@ -2175,77 +2148,86 @@
 	}
 }
 
-
 static int gem_do_start(struct net_device *dev)
 {
 	struct gem *gp = netdev_priv(dev);
-	unsigned long flags;
-
-	spin_lock_irqsave(&gp->lock, flags);
-	spin_lock(&gp->tx_lock);
+	int rc;
 
 	/* Enable the cell */
 	gem_get_cell(gp);
 
+	/* Make sure PCI access and bus master are enabled */
+	rc = pci_enable_device(gp->pdev);
+	if (rc) {
+		netdev_err(dev, "Failed to enable chip on PCI bus !\n");
+
+		/* Put cell and forget it for now, it will be considered as
+		 * still asleep, a new sleep cycle may bring it back
+		 */
+		gem_put_cell(gp);
+		return -ENXIO;
+	}
+	pci_set_master(gp->pdev);
+
 	/* Init & setup chip hardware */
 	gem_reinit_chip(gp);
 
-	gp->running = 1;
-
-	napi_enable(&gp->napi);
-
-	if (gp->lstate == link_up) {
-		netif_carrier_on(gp->dev);
-		gem_set_link_modes(gp);
-	}
-
-	netif_wake_queue(gp->dev);
-
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irqrestore(&gp->lock, flags);
-
-	if (request_irq(gp->pdev->irq, gem_interrupt,
-				   IRQF_SHARED, dev->name, (void *)dev)) {
+	/* An interrupt might come in handy */
+	rc = request_irq(gp->pdev->irq, gem_interrupt,
+			 IRQF_SHARED, dev->name, (void *)dev);
+	if (rc) {
 		netdev_err(dev, "failed to request irq !\n");
 
-		spin_lock_irqsave(&gp->lock, flags);
-		spin_lock(&gp->tx_lock);
-
-		napi_disable(&gp->napi);
-
-		gp->running =  0;
 		gem_reset(gp);
 		gem_clean_rings(gp);
 		gem_put_cell(gp);
-
-		spin_unlock(&gp->tx_lock);
-		spin_unlock_irqrestore(&gp->lock, flags);
-
-		return -EAGAIN;
+		return rc;
 	}
 
+	/* Mark us as attached again if we come from resume(), this has
+	 * no effect if we weren't detatched and needs to be done now.
+	 */
+	netif_device_attach(dev);
+
+	/* Restart NAPI & queues */
+	gem_netif_start(gp);
+
+	/* Detect & init PHY, start autoneg etc... this will
+	 * eventually result in starting DMA operations when
+	 * the link is up
+	 */
+	gem_init_phy(gp);
+
 	return 0;
 }
 
 static void gem_do_stop(struct net_device *dev, int wol)
 {
 	struct gem *gp = netdev_priv(dev);
-	unsigned long flags;
 
-	spin_lock_irqsave(&gp->lock, flags);
-	spin_lock(&gp->tx_lock);
+	/* Stop NAPI and stop tx queue */
+	gem_netif_stop(gp);
 
-	gp->running = 0;
-
-	/* Stop netif queue */
-	netif_stop_queue(dev);
-
-	/* Make sure ints are disabled */
+	/* Make sure ints are disabled. We don't care about
+	 * synchronizing as NAPI is disabled, thus a stray
+	 * interrupt will do nothing bad (our irq handler
+	 * just schedules NAPI)
+	 */
 	gem_disable_ints(gp);
 
-	/* We can drop the lock now */
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irqrestore(&gp->lock, flags);
+	/* Stop the link timer */
+	del_timer_sync(&gp->link_timer);
+
+	/* We cannot cancel the reset task while holding the
+	 * rtnl lock, we'd get an A->B / B->A deadlock stituation
+	 * if we did. This is not an issue however as the reset
+	 * task is synchronized vs. us (rtnl_lock) and will do
+	 * nothing if the device is down or suspended. We do
+	 * still clear reset_task_pending to avoid a spurrious
+	 * reset later on in case we do resume before it gets
+	 * scheduled.
+	 */
+	gp->reset_task_pending = 0;
 
 	/* If we are going to sleep with WOL */
 	gem_stop_dma(gp);
@@ -2260,79 +2242,79 @@
 	/* No irq needed anymore */
 	free_irq(gp->pdev->irq, (void *) dev);
 
+	/* Shut the PHY down eventually and setup WOL */
+	gem_stop_phy(gp, wol);
+
+	/* Make sure bus master is disabled */
+	pci_disable_device(gp->pdev);
+
 	/* Cell not needed neither if no WOL */
-	if (!wol) {
-		spin_lock_irqsave(&gp->lock, flags);
+	if (!wol)
 		gem_put_cell(gp);
-		spin_unlock_irqrestore(&gp->lock, flags);
-	}
 }
 
 static void gem_reset_task(struct work_struct *work)
 {
 	struct gem *gp = container_of(work, struct gem, reset_task);
 
-	mutex_lock(&gp->pm_mutex);
+	/* Lock out the network stack (essentially shield ourselves
+	 * against a racing open, close, control call, or suspend
+	 */
+	rtnl_lock();
 
-	if (gp->opened)
-		napi_disable(&gp->napi);
-
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
-
-	if (gp->running) {
-		netif_stop_queue(gp->dev);
-
-		/* Reset the chip & rings */
-		gem_reinit_chip(gp);
-		if (gp->lstate == link_up)
-			gem_set_link_modes(gp);
-		netif_wake_queue(gp->dev);
+	/* Skip the reset task if suspended or closed, or if it's
+	 * been cancelled by gem_do_stop (see comment there)
+	 */
+	if (!netif_device_present(gp->dev) ||
+	    !netif_running(gp->dev) ||
+	    !gp->reset_task_pending) {
+		rtnl_unlock();
+		return;
 	}
 
+	/* Stop the link timer */
+	del_timer_sync(&gp->link_timer);
+
+	/* Stop NAPI and tx */
+	gem_netif_stop(gp);
+
+	/* Reset the chip & rings */
+	gem_reinit_chip(gp);
+	if (gp->lstate == link_up)
+		gem_set_link_modes(gp);
+
+	/* Restart NAPI and Tx */
+	gem_netif_start(gp);
+
+	/* We are back ! */
 	gp->reset_task_pending = 0;
 
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
+	/* If the link is not up, restart autoneg, else restart the
+	 * polling timer
+	 */
+	if (gp->lstate != link_up)
+		gem_begin_auto_negotiation(gp, NULL);
+	else
+		mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
 
-	if (gp->opened)
-		napi_enable(&gp->napi);
-
-	mutex_unlock(&gp->pm_mutex);
+	rtnl_unlock();
 }
 
-
 static int gem_open(struct net_device *dev)
 {
-	struct gem *gp = netdev_priv(dev);
-	int rc = 0;
-
-	mutex_lock(&gp->pm_mutex);
-
-	/* We need the cell enabled */
-	if (!gp->asleep)
-		rc = gem_do_start(dev);
-	gp->opened = (rc == 0);
-
-	mutex_unlock(&gp->pm_mutex);
-
-	return rc;
+	/* We allow open while suspended, we just do nothing,
+	 * the chip will be initialized in resume()
+	 */
+	if (netif_device_present(dev))
+		return gem_do_start(dev);
+	return 0;
 }
 
 static int gem_close(struct net_device *dev)
 {
-	struct gem *gp = netdev_priv(dev);
-
-	mutex_lock(&gp->pm_mutex);
-
-	napi_disable(&gp->napi);
-
-	gp->opened = 0;
-	if (!gp->asleep)
+	if (netif_device_present(dev))
 		gem_do_stop(dev, 0);
 
-	mutex_unlock(&gp->pm_mutex);
-
 	return 0;
 }
 
@@ -2341,59 +2323,35 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct gem *gp = netdev_priv(dev);
-	unsigned long flags;
 
-	mutex_lock(&gp->pm_mutex);
+	/* Lock the network stack first to avoid racing with open/close,
+	 * reset task and setting calls
+	 */
+	rtnl_lock();
 
-	netdev_info(dev, "suspending, WakeOnLan %s\n",
-		    (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
-
-	/* Keep the cell enabled during the entire operation */
-	spin_lock_irqsave(&gp->lock, flags);
-	spin_lock(&gp->tx_lock);
-	gem_get_cell(gp);
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irqrestore(&gp->lock, flags);
-
-	/* If the driver is opened, we stop the MAC */
-	if (gp->opened) {
-		napi_disable(&gp->napi);
-
-		/* Stop traffic, mark us closed */
+	/* Not running, mark ourselves non-present, no need for
+	 * a lock here
+	 */
+	if (!netif_running(dev)) {
 		netif_device_detach(dev);
+		rtnl_unlock();
+		return 0;
+	}
+	netdev_info(dev, "suspending, WakeOnLan %s\n",
+		    (gp->wake_on_lan && netif_running(dev)) ?
+		    "enabled" : "disabled");
 
-		/* Switch off MAC, remember WOL setting */
-		gp->asleep_wol = gp->wake_on_lan;
-		gem_do_stop(dev, gp->asleep_wol);
-	} else
-		gp->asleep_wol = 0;
-
-	/* Mark us asleep */
-	gp->asleep = 1;
-	wmb();
-
-	/* Stop the link timer */
-	del_timer_sync(&gp->link_timer);
-
-	/* Now we release the mutex to not block the reset task who
-	 * can take it too. We are marked asleep, so there will be no
-	 * conflict here
+	/* Tell the network stack we're gone. gem_do_stop() below will
+	 * synchronize with TX, stop NAPI etc...
 	 */
-	mutex_unlock(&gp->pm_mutex);
+	netif_device_detach(dev);
 
-	/* Wait for the pending reset task to complete */
-	flush_work_sync(&gp->reset_task);
+	/* Switch off chip, remember WOL setting */
+	gp->asleep_wol = gp->wake_on_lan;
+	gem_do_stop(dev, gp->asleep_wol);
 
-	/* Shut the PHY down eventually and setup WOL */
-	gem_stop_phy(gp, gp->asleep_wol);
-
-	/* Make sure bus master is disabled */
-	pci_disable_device(gp->pdev);
-
-	/* Release the cell, no need to take a lock at this point since
-	 * nothing else can happen now
-	 */
-	gem_put_cell(gp);
+	/* Unlock the network stack */
+	rtnl_unlock();
 
 	return 0;
 }
@@ -2402,53 +2360,23 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct gem *gp = netdev_priv(dev);
-	unsigned long flags;
 
-	netdev_info(dev, "resuming\n");
+	/* See locking comment in gem_suspend */
+	rtnl_lock();
 
-	mutex_lock(&gp->pm_mutex);
-
-	/* Keep the cell enabled during the entire operation, no need to
-	 * take a lock here tho since nothing else can happen while we are
-	 * marked asleep
+	/* Not running, mark ourselves present, no need for
+	 * a lock here
 	 */
-	gem_get_cell(gp);
-
-	/* Make sure PCI access and bus master are enabled */
-	if (pci_enable_device(gp->pdev)) {
-		netdev_err(dev, "Can't re-enable chip !\n");
-		/* Put cell and forget it for now, it will be considered as
-		 * still asleep, a new sleep cycle may bring it back
-		 */
-		gem_put_cell(gp);
-		mutex_unlock(&gp->pm_mutex);
+	if (!netif_running(dev)) {
+		netif_device_attach(dev);
+		rtnl_unlock();
 		return 0;
 	}
-	pci_set_master(gp->pdev);
 
-	/* Reset everything */
-	gem_reset(gp);
-
-	/* Mark us woken up */
-	gp->asleep = 0;
-	wmb();
-
-	/* Bring the PHY back. Again, lock is useless at this point as
-	 * nothing can be happening until we restart the whole thing
+	/* Restart chip. If that fails there isn't much we can do, we
+	 * leave things stopped.
 	 */
-	gem_init_phy(gp);
-
-	/* If we were opened, bring everything back */
-	if (gp->opened) {
-		/* Restart MAC */
-		gem_do_start(dev);
-
-		/* Re-attach net device */
-		netif_device_attach(dev);
-	}
-
-	spin_lock_irqsave(&gp->lock, flags);
-	spin_lock(&gp->tx_lock);
+	gem_do_start(dev);
 
 	/* If we had WOL enabled, the cell clock was never turned off during
 	 * sleep, so we end up beeing unbalanced. Fix that here
@@ -2456,15 +2384,8 @@
 	if (gp->asleep_wol)
 		gem_put_cell(gp);
 
-	/* This function doesn't need to hold the cell, it will be held if the
-	 * driver is open by gem_do_start().
-	 */
-	gem_put_cell(gp);
-
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irqrestore(&gp->lock, flags);
-
-	mutex_unlock(&gp->pm_mutex);
+	/* Unlock the network stack */
+	rtnl_unlock();
 
 	return 0;
 }
@@ -2474,33 +2395,35 @@
 {
 	struct gem *gp = netdev_priv(dev);
 
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
-
 	/* I have seen this being called while the PM was in progress,
-	 * so we shield against this
+	 * so we shield against this. Let's also not poke at registers
+	 * while the reset task is going on.
+	 *
+	 * TODO: Move stats collection elsewhere (link timer ?) and
+	 * make this a nop to avoid all those synchro issues
 	 */
-	if (gp->running) {
-		dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
-		writel(0, gp->regs + MAC_FCSERR);
+	if (!netif_device_present(dev) || !netif_running(dev))
+		goto bail;
 
-		dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
-		writel(0, gp->regs + MAC_AERR);
+	/* Better safe than sorry... */
+	if (WARN_ON(!gp->cell_enabled))
+		goto bail;
 
-		dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
-		writel(0, gp->regs + MAC_LERR);
+	dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+	writel(0, gp->regs + MAC_FCSERR);
 
-		dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
-		dev->stats.collisions +=
-			(readl(gp->regs + MAC_ECOLL) +
-			 readl(gp->regs + MAC_LCOLL));
-		writel(0, gp->regs + MAC_ECOLL);
-		writel(0, gp->regs + MAC_LCOLL);
-	}
+	dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
+	writel(0, gp->regs + MAC_AERR);
 
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
+	dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
+	writel(0, gp->regs + MAC_LERR);
 
+	dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+	dev->stats.collisions +=
+		(readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
+	writel(0, gp->regs + MAC_ECOLL);
+	writel(0, gp->regs + MAC_LCOLL);
+ bail:
 	return &dev->stats;
 }
 
@@ -2513,22 +2436,19 @@
 	if (!is_valid_ether_addr(macaddr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	if (!netif_running(dev) || !netif_device_present(dev)) {
-		/* We'll just catch it later when the
-		 * device is up'd or resumed.
-		 */
-		memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
-		return 0;
-	}
-
-	mutex_lock(&gp->pm_mutex);
 	memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
-	if (gp->running) {
-		writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
-		writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
-		writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
-	}
-	mutex_unlock(&gp->pm_mutex);
+
+	/* We'll just catch it later when the device is up'd or resumed */
+	if (!netif_running(dev) || !netif_device_present(dev))
+		return 0;
+
+	/* Better safe than sorry... */
+	if (WARN_ON(!gp->cell_enabled))
+		return 0;
+
+	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
 
 	return 0;
 }
@@ -2539,14 +2459,12 @@
 	u32 rxcfg, rxcfg_new;
 	int limit = 10000;
 
+	if (!netif_running(dev) || !netif_device_present(dev))
+		return;
 
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
-
-	if (!gp->running)
-		goto bail;
-
-	netif_stop_queue(dev);
+	/* Better safe than sorry... */
+	if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
+		return;
 
 	rxcfg = readl(gp->regs + MAC_RXCFG);
 	rxcfg_new = gem_setup_multicast(gp);
@@ -2566,12 +2484,6 @@
 	rxcfg |= rxcfg_new;
 
 	writel(rxcfg, gp->regs + MAC_RXCFG);
-
-	netif_wake_queue(dev);
-
- bail:
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
 }
 
 /* Jumbo-grams don't seem to work :-( */
@@ -2589,26 +2501,21 @@
 	if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
 		return -EINVAL;
 
-	if (!netif_running(dev) || !netif_device_present(dev)) {
-		/* We'll just catch it later when the
-		 * device is up'd or resumed.
-		 */
-		dev->mtu = new_mtu;
-		return 0;
-	}
-
-	mutex_lock(&gp->pm_mutex);
-	spin_lock_irq(&gp->lock);
-	spin_lock(&gp->tx_lock);
 	dev->mtu = new_mtu;
-	if (gp->running) {
-		gem_reinit_chip(gp);
-		if (gp->lstate == link_up)
-			gem_set_link_modes(gp);
-	}
-	spin_unlock(&gp->tx_lock);
-	spin_unlock_irq(&gp->lock);
-	mutex_unlock(&gp->pm_mutex);
+
+	/* We'll just catch it later when the device is up'd or resumed */
+	if (!netif_running(dev) || !netif_device_present(dev))
+		return 0;
+
+	/* Better safe than sorry... */
+	if (WARN_ON(!gp->cell_enabled))
+		return 0;
+
+	gem_netif_stop(gp);
+	gem_reinit_chip(gp);
+	if (gp->lstate == link_up)
+		gem_set_link_modes(gp);
+	gem_netif_start(gp);
 
 	return 0;
 }
@@ -2640,7 +2547,6 @@
 		cmd->phy_address = 0; /* XXX fixed PHYAD */
 
 		/* Return current PHY settings */
-		spin_lock_irq(&gp->lock);
 		cmd->autoneg = gp->want_autoneg;
 		ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
 		cmd->duplex = gp->phy_mii.duplex;
@@ -2652,7 +2558,6 @@
 		 */
 		if (cmd->advertising == 0)
 			cmd->advertising = cmd->supported;
-		spin_unlock_irq(&gp->lock);
 	} else { // XXX PCS ?
 		cmd->supported =
 			(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -2706,11 +2611,10 @@
 		return -EINVAL;
 
 	/* Apply settings and restart link process. */
-	spin_lock_irq(&gp->lock);
-	gem_get_cell(gp);
-	gem_begin_auto_negotiation(gp, cmd);
-	gem_put_cell(gp);
-	spin_unlock_irq(&gp->lock);
+	if (netif_device_present(gp->dev)) {
+		del_timer_sync(&gp->link_timer);
+		gem_begin_auto_negotiation(gp, cmd);
+	}
 
 	return 0;
 }
@@ -2722,12 +2626,11 @@
 	if (!gp->want_autoneg)
 		return -EINVAL;
 
-	/* Restart link process. */
-	spin_lock_irq(&gp->lock);
-	gem_get_cell(gp);
-	gem_begin_auto_negotiation(gp, NULL);
-	gem_put_cell(gp);
-	spin_unlock_irq(&gp->lock);
+	/* Restart link process  */
+	if (netif_device_present(gp->dev)) {
+		del_timer_sync(&gp->link_timer);
+		gem_begin_auto_negotiation(gp, NULL);
+	}
 
 	return 0;
 }
@@ -2791,16 +2694,11 @@
 	struct gem *gp = netdev_priv(dev);
 	struct mii_ioctl_data *data = if_mii(ifr);
 	int rc = -EOPNOTSUPP;
-	unsigned long flags;
 
-	/* Hold the PM mutex while doing ioctl's or we may collide
-	 * with power management.
+	/* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
+	 * netif_device_present() is true and holds rtnl_lock for us
+	 * so we have nothing to worry about
 	 */
-	mutex_lock(&gp->pm_mutex);
-
-	spin_lock_irqsave(&gp->lock, flags);
-	gem_get_cell(gp);
-	spin_unlock_irqrestore(&gp->lock, flags);
 
 	switch (cmd) {
 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
@@ -2808,32 +2706,17 @@
 		/* Fallthrough... */
 
 	case SIOCGMIIREG:		/* Read MII PHY register. */
-		if (!gp->running)
-			rc = -EAGAIN;
-		else {
-			data->val_out = __phy_read(gp, data->phy_id & 0x1f,
-						   data->reg_num & 0x1f);
-			rc = 0;
-		}
+		data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+					   data->reg_num & 0x1f);
+		rc = 0;
 		break;
 
 	case SIOCSMIIREG:		/* Write MII PHY register. */
-		if (!gp->running)
-			rc = -EAGAIN;
-		else {
-			__phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
-				    data->val_in);
-			rc = 0;
-		}
+		__phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+			    data->val_in);
+		rc = 0;
 		break;
-	};
-
-	spin_lock_irqsave(&gp->lock, flags);
-	gem_put_cell(gp);
-	spin_unlock_irqrestore(&gp->lock, flags);
-
-	mutex_unlock(&gp->pm_mutex);
-
+	}
 	return rc;
 }
 
@@ -2921,23 +2804,9 @@
 
 		unregister_netdev(dev);
 
-		/* Stop the link timer */
-		del_timer_sync(&gp->link_timer);
-
-		/* We shouldn't need any locking here */
-		gem_get_cell(gp);
-
-		/* Cancel reset task */
+		/* Ensure reset task is truely gone */
 		cancel_work_sync(&gp->reset_task);
 
-		/* Shut the PHY down */
-		gem_stop_phy(gp, 0);
-
-		gem_put_cell(gp);
-
-		/* Make sure bus master is disabled */
-		pci_disable_device(gp->pdev);
-
 		/* Free resources */
 		pci_free_consistent(pdev,
 				    sizeof(struct gem_init_block),
@@ -3043,10 +2912,6 @@
 
 	gp->msg_enable = DEFAULT_MSG;
 
-	spin_lock_init(&gp->lock);
-	spin_lock_init(&gp->tx_lock);
-	mutex_init(&gp->pm_mutex);
-
 	init_timer(&gp->link_timer);
 	gp->link_timer.function = gem_link_timer;
 	gp->link_timer.data = (unsigned long) gp;
@@ -3122,14 +2987,11 @@
 	/* Set that now, in case PM kicks in now */
 	pci_set_drvdata(pdev, dev);
 
-	/* Detect & init PHY, start autoneg, we release the cell now
-	 * too, it will be managed by whoever needs it
-	 */
-	gem_init_phy(gp);
-
-	spin_lock_irq(&gp->lock);
-	gem_put_cell(gp);
-	spin_unlock_irq(&gp->lock);
+	/* We can do scatter/gather and HW checksum */
+	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+	if (pci_using_dac)
+		dev->features |= NETIF_F_HIGHDMA;
 
 	/* Register with kernel */
 	if (register_netdev(dev)) {
@@ -3138,20 +3000,15 @@
 		goto err_out_free_consistent;
 	}
 
+	/* Undo the get_cell with appropriate locking (we could use
+	 * ndo_init/uninit but that would be even more clumsy imho)
+	 */
+	rtnl_lock();
+	gem_put_cell(gp);
+	rtnl_unlock();
+
 	netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
 		    dev->dev_addr);
-
-	if (gp->phy_type == phy_mii_mdio0 ||
-     	    gp->phy_type == phy_mii_mdio1)
-		netdev_info(dev, "Found %s PHY\n",
-			    gp->phy_mii.def ? gp->phy_mii.def->name : "no");
-
-	/* GEM can do it all... */
-	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-	dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX;
-	if (pci_using_dac)
-		dev->features |= NETIF_F_HIGHDMA;
-
 	return 0;
 
 err_out_free_consistent:
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index d225077..835ce1b 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -973,23 +973,14 @@
 };
 
 struct gem {
-	spinlock_t		lock;
-	spinlock_t		tx_lock;
 	void __iomem		*regs;
 	int			rx_new, rx_old;
 	int			tx_new, tx_old;
 
 	unsigned int has_wol : 1;	/* chip supports wake-on-lan */
-	unsigned int asleep : 1;	/* chip asleep, protected by pm_mutex */
 	unsigned int asleep_wol : 1;	/* was asleep with WOL enabled */
-	unsigned int opened : 1;	/* driver opened, protected by pm_mutex */
-	unsigned int running : 1;	/* chip running, protected by lock */
 
-	/* cell enable count, protected by lock */
 	int			cell_enabled;
-
-	struct mutex		pm_mutex;
-
 	u32			msg_enable;
 	u32			status;
 
@@ -1033,20 +1024,4 @@
 #define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
 			   gp->phy_mii.def && gp->phy_mii.def->ops)
 
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(int size,
-						gfp_t gfp_flags)
-{
-	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
-
-	if (skb) {
-		int offset = (int) ALIGNED_RX_SKB_ADDR(skb->data);
-		if (offset)
-			skb_reserve(skb, offset);
-	}
-
-	return skb;
-}
-
 #endif /* _SUNGEM_H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index a1f9f9e..97cd02d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/in.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
@@ -106,6 +107,8 @@
 	 NETIF_MSG_RX_ERR	| \
 	 NETIF_MSG_TX_ERR)
 
+#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
+
 /* length of time before we decide the hardware is borked,
  * and dev->tx_timeout() should be called to fix the problem
  */
@@ -860,7 +863,7 @@
 	int ret;
 
 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
-	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
+	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
 		return 0;
 
 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
@@ -1980,15 +1983,14 @@
 
 		/* Set full-duplex, 1000 mbps.  */
 		tg3_writephy(tp, MII_BMCR,
-			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
+			     BMCR_FULLDPLX | BMCR_SPEED1000);
 
 		/* Set to master mode.  */
-		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
+		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
 			continue;
 
-		tg3_writephy(tp, MII_TG3_CTRL,
-			     (MII_TG3_CTRL_AS_MASTER |
-			      MII_TG3_CTRL_ENABLE_AS_MASTER));
+		tg3_writephy(tp, MII_CTRL1000,
+			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 
 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 		if (err)
@@ -2013,7 +2015,7 @@
 
 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 
-	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
+	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 
 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
 		reg32 &= ~0x3000;
@@ -2165,6 +2167,118 @@
 	return 0;
 }
 
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return 0;
+
+	tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+	u32 grc_local_ctrl;
+
+	if (!tg3_flag(tp, IS_NIC) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+		return;
+
+	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+			    (GRC_LCLCTRL_GPIO_OE0 |
+			     GRC_LCLCTRL_GPIO_OE1 |
+			     GRC_LCLCTRL_GPIO_OE2 |
+			     GRC_LCLCTRL_GPIO_OUTPUT0 |
+			     GRC_LCLCTRL_GPIO_OUTPUT1),
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+				     GRC_LCLCTRL_GPIO_OE1 |
+				     GRC_LCLCTRL_GPIO_OE2 |
+				     GRC_LCLCTRL_GPIO_OUTPUT0 |
+				     GRC_LCLCTRL_GPIO_OUTPUT1 |
+				     tp->grc_local_ctrl;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else {
+		u32 no_gpio2;
+		u32 grc_local_ctrl = 0;
+
+		/* Workaround to prevent overdrawing Amps. */
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+				    grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+
+		/* On 5753 and variants, GPIO2 cannot be used. */
+		no_gpio2 = tp->nic_sram_data_cfg &
+			   NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+				  GRC_LCLCTRL_GPIO_OE1 |
+				  GRC_LCLCTRL_GPIO_OE2 |
+				  GRC_LCLCTRL_GPIO_OUTPUT1 |
+				  GRC_LCLCTRL_GPIO_OUTPUT2;
+		if (no_gpio2) {
+			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+					    GRC_LCLCTRL_GPIO_OUTPUT2);
+		}
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		if (!no_gpio2) {
+			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+			tw32_wait_f(GRC_LOCAL_CTRL,
+				    tp->grc_local_ctrl | grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+	}
+}
+
 static void tg3_frob_aux_power(struct tg3 *tp)
 {
 	bool need_vaux = false;
@@ -2200,86 +2314,10 @@
 	if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
 		need_vaux = true;
 
-	if (need_vaux) {
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-				    (GRC_LCLCTRL_GPIO_OE0 |
-				     GRC_LCLCTRL_GPIO_OE1 |
-				     GRC_LCLCTRL_GPIO_OE2 |
-				     GRC_LCLCTRL_GPIO_OUTPUT0 |
-				     GRC_LCLCTRL_GPIO_OUTPUT1),
-				    100);
-		} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
-			   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
-			/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
-			u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
-					     GRC_LCLCTRL_GPIO_OE1 |
-					     GRC_LCLCTRL_GPIO_OE2 |
-					     GRC_LCLCTRL_GPIO_OUTPUT0 |
-					     GRC_LCLCTRL_GPIO_OUTPUT1 |
-					     tp->grc_local_ctrl;
-			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
-
-			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
-			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
-
-			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
-			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
-		} else {
-			u32 no_gpio2;
-			u32 grc_local_ctrl = 0;
-
-			/* Workaround to prevent overdrawing Amps. */
-			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
-			    ASIC_REV_5714) {
-				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
-				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-					    grc_local_ctrl, 100);
-			}
-
-			/* On 5753 and variants, GPIO2 cannot be used. */
-			no_gpio2 = tp->nic_sram_data_cfg &
-				    NIC_SRAM_DATA_CFG_NO_GPIO2;
-
-			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
-					 GRC_LCLCTRL_GPIO_OE1 |
-					 GRC_LCLCTRL_GPIO_OE2 |
-					 GRC_LCLCTRL_GPIO_OUTPUT1 |
-					 GRC_LCLCTRL_GPIO_OUTPUT2;
-			if (no_gpio2) {
-				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
-						    GRC_LCLCTRL_GPIO_OUTPUT2);
-			}
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-						    grc_local_ctrl, 100);
-
-			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
-
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-						    grc_local_ctrl, 100);
-
-			if (!no_gpio2) {
-				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
-				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-					    grc_local_ctrl, 100);
-			}
-		}
-	} else {
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
-		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-				    (GRC_LCLCTRL_GPIO_OE1 |
-				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
-
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-				    GRC_LCLCTRL_GPIO_OE1, 100);
-
-			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
-				    (GRC_LCLCTRL_GPIO_OE1 |
-				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
-		}
-	}
+	if (need_vaux)
+		tg3_pwrsrc_switch_to_vaux(tp);
+	else
+		tg3_pwrsrc_die_with_vmain(tp);
 }
 
 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
@@ -2624,8 +2662,7 @@
 	pci_set_power_state(tp->pdev, PCI_D0);
 
 	/* Switch out of Vaux if it is a NIC */
-	if (tg3_flag(tp, IS_NIC))
-		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
+	tg3_pwrsrc_switch_to_vmain(tp);
 
 	return 0;
 }
@@ -2957,16 +2994,15 @@
 
 	new_adv = 0;
 	if (advertise & ADVERTISED_1000baseT_Half)
-		new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
+		new_adv |= ADVERTISE_1000HALF;
 	if (advertise & ADVERTISED_1000baseT_Full)
-		new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
+		new_adv |= ADVERTISE_1000FULL;
 
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
-		new_adv |= (MII_TG3_CTRL_AS_MASTER |
-			    MII_TG3_CTRL_ENABLE_AS_MASTER);
+		new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
-	err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
+	err = tg3_writephy(tp, MII_CTRL1000, new_adv);
 	if (err)
 		goto done;
 
@@ -3075,7 +3111,7 @@
 			break;
 
 		case SPEED_1000:
-			bmcr |= TG3_BMCR_SPEED1000;
+			bmcr |= BMCR_SPEED1000;
 			break;
 		}
 
@@ -3152,7 +3188,7 @@
 		if (mask & ADVERTISED_1000baseT_Full)
 			all_mask |= ADVERTISE_1000FULL;
 
-		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
+		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
 			return 0;
 
 		if ((tg3_ctrl & all_mask) != all_mask)
@@ -5821,8 +5857,7 @@
 		/* Make sure new skb does not cross any 4G boundaries.
 		 * Drop the packet if it does.
 		 */
-		} else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
-			   tg3_4g_overflow_test(new_addr, new_skb->len)) {
+		} else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
 			pci_unmap_single(tp->pdev, new_addr, new_skb->len,
 					 PCI_DMA_TODEVICE);
 			ret = -1;
@@ -6017,12 +6052,10 @@
 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
 		would_hit_hwbug = 1;
 
-	if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
-	    tg3_4g_overflow_test(mapping, len))
+	if (tg3_4g_overflow_test(mapping, len))
 		would_hit_hwbug = 1;
 
-	if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
-	    tg3_40bit_overflow_test(tp, mapping, len))
+	if (tg3_40bit_overflow_test(tp, mapping, len))
 		would_hit_hwbug = 1;
 
 	if (tg3_flag(tp, 5701_DMA_BUG))
@@ -6055,12 +6088,10 @@
 			    len <= 8)
 				would_hit_hwbug = 1;
 
-			if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
-			    tg3_4g_overflow_test(mapping, len))
+			if (tg3_4g_overflow_test(mapping, len))
 				would_hit_hwbug = 1;
 
-			if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
-			    tg3_40bit_overflow_test(tp, mapping, len))
+			if (tg3_40bit_overflow_test(tp, mapping, len))
 				would_hit_hwbug = 1;
 
 			if (tg3_flag(tp, HW_TSO_1) ||
@@ -6088,6 +6119,8 @@
 		entry = NEXT_TX(tnapi->tx_prod);
 	}
 
+	skb_tx_timestamp(skb);
+
 	/* Packets are ready, update Tx producer idx local and on card. */
 	tw32_tx_mbox(tnapi->prodmbox, entry);
 
@@ -7751,6 +7784,9 @@
 
 	/* Disable interrupts */
 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+	tp->napi[0].chk_msi_cnt = 0;
+	tp->napi[0].last_rx_cons = 0;
+	tp->napi[0].last_tx_cons = 0;
 
 	/* Zero mailbox registers. */
 	if (tg3_flag(tp, SUPPORT_MSIX)) {
@@ -7761,6 +7797,9 @@
 				tw32_mailbox(tp->napi[i].prodmbox, 0);
 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+			tp->napi[0].chk_msi_cnt = 0;
+			tp->napi[i].last_rx_cons = 0;
+			tp->napi[i].last_tx_cons = 0;
 		}
 		if (!tg3_flag(tp, ENABLE_TSS))
 			tw32_mailbox(tp->napi[0].prodmbox, 0);
@@ -8816,6 +8855,30 @@
 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
 }
 
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+	u32 i;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tg3_has_work(tnapi)) {
+			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+			    tnapi->last_tx_cons == tnapi->tx_cons) {
+				if (tnapi->chk_msi_cnt < 1) {
+					tnapi->chk_msi_cnt++;
+					return;
+				}
+				tw32_mailbox(tnapi->int_mbox,
+					     tnapi->last_tag << 24);
+			}
+		}
+		tnapi->chk_msi_cnt = 0;
+		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+		tnapi->last_tx_cons = tnapi->tx_cons;
+	}
+}
+
 static void tg3_timer(unsigned long __opaque)
 {
 	struct tg3 *tp = (struct tg3 *) __opaque;
@@ -8825,6 +8888,10 @@
 
 	spin_lock(&tp->lock);
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tg3_chk_missed_msi(tp);
+
 	if (!tg3_flag(tp, TAGGED_STATUS)) {
 		/* All of this garbage is because when using non-tagged
 		 * IRQ status the mailbox/status_block protocol the chip
@@ -9300,7 +9367,9 @@
 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 		tg3_free_rings(tp);
 	} else {
-		if (tg3_flag(tp, TAGGED_STATUS))
+		if (tg3_flag(tp, TAGGED_STATUS) &&
+			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
 			tp->timer_offset = HZ;
 		else
 			tp->timer_offset = HZ / 10;
@@ -9902,6 +9971,18 @@
 	}
 
 	cmd->advertising = tp->link_config.advertising;
+	if (tg3_flag(tp, PAUSE_AUTONEG)) {
+		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+				cmd->advertising |= ADVERTISED_Pause;
+			} else {
+				cmd->advertising |= ADVERTISED_Pause |
+						    ADVERTISED_Asym_Pause;
+			}
+		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+			cmd->advertising |= ADVERTISED_Asym_Pause;
+		}
+	}
 	if (netif_running(dev)) {
 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
 		cmd->duplex = tp->link_config.active_duplex;
@@ -10436,6 +10517,9 @@
 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x4c
 #define NVRAM_SELFBOOT_HW_SIZE 0x20
 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
 
@@ -10466,8 +10550,17 @@
 			case TG3_EEPROM_SB_REVISION_3:
 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
 				break;
+			case TG3_EEPROM_SB_REVISION_4:
+				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_5:
+				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_6:
+				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+				break;
 			default:
-				return 0;
+				return -EIO;
 			}
 		} else
 			return 0;
@@ -13666,15 +13759,8 @@
 		}
 	}
 
-	/* All chips can get confused if TX buffers
-	 * straddle the 4GB address boundary.
-	 */
-	tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
-
 	if (tg3_flag(tp, 5755_PLUS))
 		tg3_flag_set(tp, SHORT_DMA_BUG);
-	else
-		tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
 
 	if (tg3_flag(tp, 5717_PLUS))
 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5b3d2f3..bedc3b4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1118,10 +1118,10 @@
 #define  TG3_CPMU_EEEMD_EEE_ENABLE	 0x00100000
 #define TG3_CPMU_EEE_DBTMR1		0x000036b4
 #define  TG3_CPMU_DBTMR1_PCIEXIT_2047US	 0x07ff0000
-#define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000070ff
+#define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000007ff
 #define TG3_CPMU_EEE_DBTMR2		0x000036b8
 #define  TG3_CPMU_DBTMR2_APE_TX_2047US	 0x07ff0000
-#define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000070ff
+#define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000007ff
 #define TG3_CPMU_EEE_LNKIDL_CTRL	0x000036bc
 #define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0	 0x01000000
 #define  TG3_CPMU_EEE_LNKIDL_UART_IDL	 0x00000004
@@ -2152,14 +2152,6 @@
 
 
 /*** Tigon3 specific PHY MII registers. ***/
-#define  TG3_BMCR_SPEED1000		0x0040
-
-#define MII_TG3_CTRL			0x09 /* 1000-baseT control register */
-#define  MII_TG3_CTRL_ADV_1000_HALF	0x0100
-#define  MII_TG3_CTRL_ADV_1000_FULL	0x0200
-#define  MII_TG3_CTRL_AS_MASTER		0x0800
-#define  MII_TG3_CTRL_ENABLE_AS_MASTER	0x1000
-
 #define MII_TG3_MMD_CTRL		0x0d /* MMD Access Control register */
 #define MII_TG3_MMD_CTRL_DATA_NOINC	0x4000
 #define MII_TG3_MMD_ADDRESS		0x0e /* MMD Address Data register */
@@ -2800,6 +2792,7 @@
 	struct tg3			*tp;
 	struct tg3_hw_status		*hw_status;
 
+	u32				chk_msi_cnt;
 	u32				last_tag;
 	u32				last_irq_tag;
 	u32				int_mbox;
@@ -2807,6 +2800,7 @@
 
 	u32				consmbox ____cacheline_aligned;
 	u32				rx_rcb_ptr;
+	u32				last_rx_cons;
 	u16				*rx_rcb_prod_idx;
 	struct tg3_rx_prodring_set	prodring;
 	struct tg3_rx_buffer_desc	*rx_rcb;
@@ -2814,6 +2808,7 @@
 	u32				tx_prod	____cacheline_aligned;
 	u32				tx_cons;
 	u32				tx_pending;
+	u32				last_tx_cons;
 	u32				prodmbox;
 	struct tg3_tx_buffer_desc	*tx_ring;
 	struct ring_info		*tx_buffers;
@@ -2893,8 +2888,6 @@
 	TG3_FLAG_NO_NVRAM,
 	TG3_FLAG_ENABLE_RSS,
 	TG3_FLAG_ENABLE_TSS,
-	TG3_FLAG_4G_DMA_BNDRY_BUG,
-	TG3_FLAG_40BIT_DMA_LIMIT_BUG,
 	TG3_FLAG_SHORT_DMA_BUG,
 	TG3_FLAG_USE_JUMBO_BDFLAG,
 	TG3_FLAG_L1PLLPD_EN,
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ace6404..145871b 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -29,8 +29,10 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/eisa.h>
 #include <linux/pci.h>
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index ff32bef..b6162fe 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -304,7 +304,7 @@
 
 	if ((i = pci_request_regions(pdev,"3c359"))) { 
 		return i ; 
-	} ; 
+	}
 
 	/* 
 	 * Allowing init_trdev to allocate the private data will align
@@ -1773,7 +1773,9 @@
 	if (readb(xl_mmio + MMIO_MACDATA) != 0) {  /* Misr not clear */
 		for (i=0; i<6; i++) { 
 			writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			while (readb(xl_mmio + MMIO_MACDATA) != 0 ) {} ; /* Empty Loop */
+			while (readb(xl_mmio + MMIO_MACDATA) != 0) {
+				;	/* Empty Loop */
+			}
 		} 
 	}
 
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 4786497..e257a00 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -123,6 +123,7 @@
 /* some 95 OS send many non UI frame; this allow removing the warning */
 #define TR_FILTERNONUI	1
 
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/netdevice.h>
 #include <linux/ip.h>
@@ -177,7 +178,7 @@
 	case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
 	case 0xC: return "Auto 16/4 Adapter";
 	default: return "adapter (unknown type)";
-	};
+	}
 };
 
 #define TRC_INIT 0x01		/*  Trace initialization & PROBEs */
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5c633a3..64cb9ac 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -33,6 +33,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e2f6923..ce90efc 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -38,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index aa4d9da..52d898b 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -13,6 +13,7 @@
 	Please submit bugs to http://bugzilla.kernel.org/ .
 */
 
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/jiffies.h>
 #include "tulip.h"
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 82f8764..1246998 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include "tulip.h"
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/etherdevice.h>
 #include <linux/delay.h>
 #include <linux/mii.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5235f48..9a6b382 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -572,9 +572,9 @@
 
 /* prepad is the amount to reserve at front.  len is length after that.
  * linear is a hint as to how much to copy (usually headers). */
-static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
-					    size_t prepad, size_t len,
-					    size_t linear, int noblock)
+static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+				     size_t prepad, size_t len,
+				     size_t linear, int noblock)
 {
 	struct sock *sk = tun->socket.sk;
 	struct sk_buff *skb;
@@ -600,13 +600,13 @@
 }
 
 /* Get packet from user space buffer */
-static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
-				       const struct iovec *iv, size_t count,
-				       int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun,
+			    const struct iovec *iv, size_t count,
+			    int noblock)
 {
 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
 	struct sk_buff *skb;
-	size_t len = count, align = 0;
+	size_t len = count, align = NET_SKB_PAD;
 	struct virtio_net_hdr gso = { 0 };
 	int offset = 0;
 
@@ -636,7 +636,7 @@
 	}
 
 	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
-		align = NET_IP_ALIGN;
+		align += NET_IP_ALIGN;
 		if (unlikely(len < ETH_HLEN ||
 			     (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
 			return -EINVAL;
@@ -688,7 +688,7 @@
 	case TUN_TAP_DEV:
 		skb->protocol = eth_type_trans(skb, tun->dev);
 		break;
-	};
+	}
 
 	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 		pr_debug("GSO!\n");
@@ -751,9 +751,9 @@
 }
 
 /* Put packet to the user space buffer */
-static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
-				       struct sk_buff *skb,
-				       const struct iovec *iv, int len)
+static ssize_t tun_put_user(struct tun_struct *tun,
+			    struct sk_buff *skb,
+			    const struct iovec *iv, int len)
 {
 	struct tun_pi pi = { 0, skb->protocol };
 	ssize_t total = 0;
@@ -810,6 +810,8 @@
 			gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 			gso.csum_start = skb_checksum_start_offset(skb);
 			gso.csum_offset = skb->csum_offset;
+		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+			gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
 		} /* else everything is zero */
 
 		if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
@@ -839,7 +841,8 @@
 
 	tun_debug(KERN_INFO, tun, "tun_chr_read\n");
 
-	add_wait_queue(&tun->wq.wait, &wait);
+	if (unlikely(!noblock))
+		add_wait_queue(&tun->wq.wait, &wait);
 	while (len) {
 		current->state = TASK_INTERRUPTIBLE;
 
@@ -870,7 +873,8 @@
 	}
 
 	current->state = TASK_RUNNING;
-	remove_wait_queue(&tun->wq.wait, &wait);
+	if (unlikely(!noblock))
+		remove_wait_queue(&tun->wq.wait, &wait);
 
 	return ret;
 }
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index ef04105..3127700 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3165,6 +3165,8 @@
 
 	ugeth->txBd[txQ] = bd;
 
+	skb_tx_timestamp(skb);
+
 	if (ugeth->p_scheduler) {
 		ugeth->cpucount[txQ]++;
 		/* Indicate to QE that there are more Tx bds ready for
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 81126ff..15772b1 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -409,12 +409,6 @@
 	usb_unlink_urb(dev->tx_urb);
 }
 
-static struct net_device_stats *ipheth_stats(struct net_device *net)
-{
-	struct ipheth_device *dev = netdev_priv(net);
-	return &dev->net->stats;
-}
-
 static u32 ipheth_ethtool_op_get_link(struct net_device *net)
 {
 	struct ipheth_device *dev = netdev_priv(net);
@@ -426,11 +420,10 @@
 };
 
 static const struct net_device_ops ipheth_netdev_ops = {
-	.ndo_open = &ipheth_open,
-	.ndo_stop = &ipheth_close,
-	.ndo_start_xmit = &ipheth_tx,
-	.ndo_tx_timeout = &ipheth_tx_timeout,
-	.ndo_get_stats = &ipheth_stats,
+	.ndo_open = ipheth_open,
+	.ndo_stop = ipheth_close,
+	.ndo_start_xmit = ipheth_tx,
+	.ndo_tx_timeout = ipheth_tx_timeout,
 };
 
 static int ipheth_probe(struct usb_interface *intf,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8461576..4b6db3b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
 
 #include <net/dst.h>
 #include <net/xfrm.h>
@@ -24,12 +25,13 @@
 #define MAX_MTU 65535		/* Max L3 MTU (arbitrary) */
 
 struct veth_net_stats {
-	unsigned long	rx_packets;
-	unsigned long	tx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_bytes;
-	unsigned long	tx_dropped;
-	unsigned long	rx_dropped;
+	u64			rx_packets;
+	u64			tx_packets;
+	u64			rx_bytes;
+	u64			tx_bytes;
+	u64			rx_dropped;
+	u64			tx_dropped;
+	struct u64_stats_sync	syncp;
 };
 
 struct veth_priv {
@@ -137,21 +139,29 @@
 	if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
 		goto rx_drop;
 
+	u64_stats_update_begin(&stats->syncp);
 	stats->tx_bytes += length;
 	stats->tx_packets++;
+	u64_stats_update_end(&stats->syncp);
 
+	u64_stats_update_begin(&rcv_stats->syncp);
 	rcv_stats->rx_bytes += length;
 	rcv_stats->rx_packets++;
+	u64_stats_update_end(&rcv_stats->syncp);
 
 	return NETDEV_TX_OK;
 
 tx_drop:
 	kfree_skb(skb);
+	u64_stats_update_begin(&stats->syncp);
 	stats->tx_dropped++;
+	u64_stats_update_end(&stats->syncp);
 	return NETDEV_TX_OK;
 
 rx_drop:
+	u64_stats_update_begin(&rcv_stats->syncp);
 	rcv_stats->rx_dropped++;
+	u64_stats_update_end(&rcv_stats->syncp);
 	return NETDEV_TX_OK;
 }
 
@@ -159,32 +169,36 @@
  * general routines
  */
 
-static struct net_device_stats *veth_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+						  struct rtnl_link_stats64 *tot)
 {
-	struct veth_priv *priv;
+	struct veth_priv *priv = netdev_priv(dev);
 	int cpu;
-	struct veth_net_stats *stats, total = {0};
-
-	priv = netdev_priv(dev);
 
 	for_each_possible_cpu(cpu) {
-		stats = per_cpu_ptr(priv->stats, cpu);
+		struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
+		u64 rx_packets, rx_bytes, rx_dropped;
+		u64 tx_packets, tx_bytes, tx_dropped;
+		unsigned int start;
 
-		total.rx_packets += stats->rx_packets;
-		total.tx_packets += stats->tx_packets;
-		total.rx_bytes   += stats->rx_bytes;
-		total.tx_bytes   += stats->tx_bytes;
-		total.tx_dropped += stats->tx_dropped;
-		total.rx_dropped += stats->rx_dropped;
+		do {
+			start = u64_stats_fetch_begin_bh(&stats->syncp);
+			rx_packets = stats->rx_packets;
+			tx_packets = stats->tx_packets;
+			rx_bytes = stats->rx_bytes;
+			tx_bytes = stats->tx_bytes;
+			rx_dropped = stats->rx_dropped;
+			tx_dropped = stats->tx_dropped;
+		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
+		tot->rx_dropped += rx_dropped;
+		tot->tx_dropped += tx_dropped;
 	}
-	dev->stats.rx_packets = total.rx_packets;
-	dev->stats.tx_packets = total.tx_packets;
-	dev->stats.rx_bytes   = total.rx_bytes;
-	dev->stats.tx_bytes   = total.tx_bytes;
-	dev->stats.tx_dropped = total.tx_dropped;
-	dev->stats.rx_dropped = total.rx_dropped;
 
-	return &dev->stats;
+	return tot;
 }
 
 static int veth_open(struct net_device *dev)
@@ -254,7 +268,7 @@
 	.ndo_stop            = veth_close,
 	.ndo_start_xmit      = veth_xmit,
 	.ndo_change_mtu      = veth_change_mtu,
-	.ndo_get_stats       = veth_get_stats,
+	.ndo_get_stats64     = veth_get_stats64,
 	.ndo_set_mac_address = eth_mac_addr,
 };
 
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 06daa9d..f929242 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1887,7 +1887,7 @@
 		else
 			netif_wake_queue(vptr->dev);
 
-	};
+	}
 	if (status & ISR_MIBFI)
 		velocity_update_hw_mibs(vptr);
 	if (status & ISR_LSTEI)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f685324..be3686a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -274,6 +274,8 @@
 					  hdr->hdr.csum_start,
 					  hdr->hdr.csum_offset))
 			goto frame_err;
+	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
 	skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fa6e2ac..33097ec 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -405,10 +405,8 @@
 
 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
 		struct vmxnet3_tx_buf_info *tbi;
-		union Vmxnet3_GenericDesc *gdesc;
 
 		tbi = tq->buf_info + tq->tx_ring.next2comp;
-		gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
 
 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
 		if (tbi->skb) {
@@ -2864,7 +2862,7 @@
 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
 		.ndo_change_mtu = vmxnet3_change_mtu,
 		.ndo_set_features = vmxnet3_set_features,
-		.ndo_get_stats = vmxnet3_get_stats,
+		.ndo_get_stats64 = vmxnet3_get_stats64,
 		.ndo_tx_timeout = vmxnet3_tx_timeout,
 		.ndo_set_multicast_list = vmxnet3_set_mc,
 		.ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index dc959fe..bba7c15 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -113,15 +113,15 @@
 };
 
 
-struct net_device_stats *
-vmxnet3_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *
+vmxnet3_get_stats64(struct net_device *netdev,
+		   struct rtnl_link_stats64 *stats)
 {
 	struct vmxnet3_adapter *adapter;
 	struct vmxnet3_tq_driver_stats *drvTxStats;
 	struct vmxnet3_rq_driver_stats *drvRxStats;
 	struct UPT1_TxStats *devTxStats;
 	struct UPT1_RxStats *devRxStats;
-	struct net_device_stats *net_stats = &netdev->stats;
 	unsigned long flags;
 	int i;
 
@@ -132,36 +132,36 @@
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
-	memset(net_stats, 0, sizeof(*net_stats));
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		devTxStats = &adapter->tqd_start[i].stats;
 		drvTxStats = &adapter->tx_queue[i].stats;
-		net_stats->tx_packets += devTxStats->ucastPktsTxOK +
-					devTxStats->mcastPktsTxOK +
-					devTxStats->bcastPktsTxOK;
-		net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
-				      devTxStats->mcastBytesTxOK +
-				      devTxStats->bcastBytesTxOK;
-		net_stats->tx_errors += devTxStats->pktsTxError;
-		net_stats->tx_dropped += drvTxStats->drop_total;
+		stats->tx_packets += devTxStats->ucastPktsTxOK +
+				     devTxStats->mcastPktsTxOK +
+				     devTxStats->bcastPktsTxOK;
+		stats->tx_bytes += devTxStats->ucastBytesTxOK +
+				   devTxStats->mcastBytesTxOK +
+				   devTxStats->bcastBytesTxOK;
+		stats->tx_errors += devTxStats->pktsTxError;
+		stats->tx_dropped += drvTxStats->drop_total;
 	}
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		devRxStats = &adapter->rqd_start[i].stats;
 		drvRxStats = &adapter->rx_queue[i].stats;
-		net_stats->rx_packets += devRxStats->ucastPktsRxOK +
-					devRxStats->mcastPktsRxOK +
-					devRxStats->bcastPktsRxOK;
+		stats->rx_packets += devRxStats->ucastPktsRxOK +
+				     devRxStats->mcastPktsRxOK +
+				     devRxStats->bcastPktsRxOK;
 
-		net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
-				      devRxStats->mcastBytesRxOK +
-				      devRxStats->bcastBytesRxOK;
+		stats->rx_bytes += devRxStats->ucastBytesRxOK +
+				   devRxStats->mcastBytesRxOK +
+				   devRxStats->bcastBytesRxOK;
 
-		net_stats->rx_errors += devRxStats->pktsRxError;
-		net_stats->rx_dropped += drvRxStats->drop_total;
-		net_stats->multicast +=  devRxStats->mcastPktsRxOK;
+		stats->rx_errors += devRxStats->pktsRxError;
+		stats->rx_dropped += drvRxStats->drop_total;
+		stats->multicast +=  devRxStats->mcastPktsRxOK;
 	}
-	return net_stats;
+
+	return stats;
 }
 
 static int
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index f50d36f..0e567c24 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -323,7 +323,6 @@
 	struct Vmxnet3_TxQueueDesc	*tqd_start;     /* all tx queue desc */
 	struct Vmxnet3_RxQueueDesc	*rqd_start;	/* all rx queue desc */
 	struct net_device		*netdev;
-	struct net_device_stats		net_stats;
 	struct pci_dev			*pdev;
 
 	u8			__iomem *hw_addr0; /* for BAR 0 */
@@ -407,7 +406,9 @@
 		      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 
 extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
+
+extern struct rtnl_link_stats64 *
+vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
 #endif
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 359b9b9..6219006 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -13,6 +13,7 @@
  ******************************************************************************/
 #ifndef VXGE_CONFIG_H
 #define VXGE_CONFIG_H
+#include <linux/hardirq.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 8ab870a..e658edd 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -44,6 +44,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/if_vlan.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/tcp.h>
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 859dba9..a0976d1 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -50,6 +50,7 @@
 #include <linux/wanrouter.h>	/* WAN router definitions */
 #include <linux/cyclomx.h>	/* cyclomx common user API definitions */
 #include <linux/init.h>         /* __init (when not using as a module) */
+#include <linux/interrupt.h>
 
 unsigned int cycx_debug;
 
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index acb9ea8..3590d58 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -99,6 +99,7 @@
 #include <asm/irq.h>
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/string.h>
 
 #include <linux/if_arp.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 777d1a4..0f27f4c 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/if.h>
 #include <linux/hdlc.h>
 #include <asm/io.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index db73a7b..4ea89fe 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -22,6 +22,7 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/netdevice.h>
 #include <linux/hdlc.h>
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index afe2cbc..43ebc44 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/if.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7cf4317..17c4b56 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -161,6 +161,7 @@
 	const struct ath_bus_ops *bus_ops;
 
 	bool btcoex_enabled;
+	bool disable_ani;
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 077e8a6..45b262f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -28,11 +28,6 @@
 	((struct ath_desc*) ds)->ds_link = ds_link;
 }
 
-static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
-{
-	*ds_link = &((struct ath_desc *)ds)->ds_link;
-}
-
 static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 {
 	u32 isr = 0;
@@ -437,7 +432,6 @@
 
 	ops->rx_enable = ar9002_hw_rx_enable;
 	ops->set_desc_link = ar9002_hw_set_desc_link;
-	ops->get_desc_link = ar9002_hw_get_desc_link;
 	ops->get_isr = ar9002_hw_get_isr;
 	ops->fill_txdesc = ar9002_hw_fill_txdesc;
 	ops->proc_txdesc = ar9002_hw_proc_txdesc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 10d71f7..04e6be0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -43,13 +43,6 @@
 	ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
 }
 
-static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
-{
-	struct ar9003_txc *ads = ds;
-
-	*ds_link = &ads->link;
-}
-
 static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 {
 	u32 isr = 0;
@@ -498,7 +491,6 @@
 
 	ops->rx_enable = ar9003_hw_rx_enable;
 	ops->set_desc_link = ar9003_hw_set_desc_link;
-	ops->get_desc_link = ar9003_hw_get_desc_link;
 	ops->get_isr = ar9003_hw_get_isr;
 	ops->fill_txdesc = ar9003_hw_fill_txdesc;
 	ops->proc_txdesc = ar9003_hw_proc_txdesc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index e4d6a87..2e7f0f2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -46,11 +46,10 @@
 
 static int ar9003_get_training_power_2g(struct ath_hw *ah)
 {
-	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-	struct ar9300_modal_eep_header *hdr = &eep->modalHeader2G;
+	struct ath9k_channel *chan = ah->curchan;
 	unsigned int power, scale, delta;
 
-	scale = MS(le32_to_cpu(hdr->papdRateMaskHt20), AR9300_PAPRD_SCALE_1);
+	scale = ar9003_get_paprd_scale_factor(ah, chan);
 	power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
 			       AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
 
@@ -67,20 +66,10 @@
 static int ar9003_get_training_power_5g(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-	struct ar9300_modal_eep_header *hdr = &eep->modalHeader5G;
 	struct ath9k_channel *chan = ah->curchan;
 	unsigned int power, scale, delta;
 
-	if (chan->channel >= 5700)
-		scale = MS(le32_to_cpu(hdr->papdRateMaskHt20),
-			   AR9300_PAPRD_SCALE_1);
-	else if (chan->channel >= 5400)
-		scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
-			   AR9300_PAPRD_SCALE_2);
-	else
-		scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
-			   AR9300_PAPRD_SCALE_1);
+	scale = ar9003_get_paprd_scale_factor(ah, chan);
 
 	if (IS_CHAN_HT40(chan))
 		power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE8,
@@ -119,15 +108,16 @@
 	else
 		training_power = ar9003_get_training_power_5g(ah);
 
+	ath_dbg(common, ATH_DBG_CALIBRATE,
+		"Training power: %d, Target power: %d\n",
+		training_power, ah->paprd_target_power);
+
 	if (training_power < 0) {
 		ath_dbg(common, ATH_DBG_CALIBRATE,
 			"PAPRD target power delta out of range");
 		return -ERANGE;
 	}
 	ah->paprd_training_power = training_power;
-	ath_dbg(common, ATH_DBG_CALIBRATE,
-		"Training power: %d, Target power: %d\n",
-		ah->paprd_training_power, ah->paprd_target_power);
 
 	REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
 		      ah->paprd_ratemask);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index f75068b..57933db 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -19,6 +19,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/device.h>
+#include <linux/interrupt.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
 
@@ -179,7 +180,7 @@
 struct ath_txq {
 	int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
 	u32 axq_qnum; /* ath9k hardware queue number */
-	u32 *axq_link;
+	void *axq_link;
 	struct list_head axq_q;
 	spinlock_t axq_lock;
 	u32 axq_depth;
@@ -188,7 +189,6 @@
 	bool axq_tx_inprogress;
 	struct list_head axq_acq;
 	struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
-	struct list_head txq_fifo_pending;
 	u8 txq_headidx;
 	u8 txq_tailidx;
 	int pending_frames;
@@ -428,6 +428,7 @@
 void ath_hw_pll_work(struct work_struct *work);
 void ath_paprd_calibrate(struct work_struct *work);
 void ath_ani_calibrate(unsigned long data);
+void ath_start_ani(struct ath_common *common);
 
 /**********/
 /* BTCOEX */
@@ -669,12 +670,8 @@
 		    const struct ath_bus_ops *bus_ops);
 void ath9k_deinit_device(struct ath_softc *sc);
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
-		    struct ath9k_channel *hchan);
 
-void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
-bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
 bool ath9k_uses_beacons(int type);
 
 #ifdef CONFIG_ATH9K_PCI
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index d4d8cec..0174cdb 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -496,7 +496,7 @@
 	u32 nexttbtt, intval;
 
 	/* NB: the beacon interval is kept internally in TU's */
-	intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
+	intval = TU_TO_USEC(conf->beacon_interval);
 	intval /= ATH_BCBUF;    /* for staggered beacons */
 	nexttbtt = intval;
 
@@ -543,7 +543,7 @@
 	}
 
 	memset(&bs, 0, sizeof(bs));
-	intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
+	intval = conf->beacon_interval;
 
 	/*
 	 * Setup dtim and cfp parameters according to
@@ -652,22 +652,13 @@
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	u32 tsf, delta, intval, nexttbtt;
+	u32 tsf, intval, nexttbtt;
 
 	ath9k_reset_beacon_status(sc);
 
-	tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE);
-	intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
-
-	if (!sc->beacon.bc_tstamp)
-		nexttbtt = tsf + intval;
-	else {
-		if (tsf > sc->beacon.bc_tstamp)
-			delta = (tsf - sc->beacon.bc_tstamp);
-		else
-			delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp));
-		nexttbtt = tsf + intval - (delta % intval);
-	}
+	intval = TU_TO_USEC(conf->beacon_interval);
+	tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
+	nexttbtt = tsf + intval;
 
 	ath_dbg(common, ATH_DBG_BEACON,
 		"IBSS nexttbtt %u intval %u (%u)\n",
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index d55ffd7..22d3a26 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -176,6 +176,56 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", common->disable_ani);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_disable_ani(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	unsigned long disable_ani;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &disable_ani))
+		return -EINVAL;
+
+	common->disable_ani = !!disable_ani;
+
+	if (disable_ani) {
+		sc->sc_flags &= ~SC_OP_ANI_RUN;
+		del_timer_sync(&common->ani.timer);
+	} else {
+		sc->sc_flags |= SC_OP_ANI_RUN;
+		ath_start_ani(common);
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_disable_ani = {
+	.read = read_file_disable_ani,
+	.write = write_file_disable_ani,
+	.open = ath9k_debugfs_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
 
 static ssize_t read_file_dma(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
@@ -550,6 +600,7 @@
 
 	PR("MPDUs Queued:    ", queued);
 	PR("MPDUs Completed: ", completed);
+	PR("MPDUs XRetried:  ", xretries);
 	PR("Aggregates:      ", a_aggr);
 	PR("AMPDUs Queued HW:", a_queued_hw);
 	PR("AMPDUs Queued SW:", a_queued_sw);
@@ -587,7 +638,6 @@
 
 	PRQLE("axq_q empty:       ", axq_q);
 	PRQLE("axq_acq empty:     ", axq_acq);
-	PRQLE("txq_fifo_pending:  ", txq_fifo_pending);
 	for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
 		snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
 		PRQLE(tmp, txq_fifo[i]);
@@ -807,7 +857,10 @@
 		else
 			TX_STAT_INC(qnum, a_completed);
 	} else {
-		TX_STAT_INC(qnum, completed);
+		if (bf_isxretried(bf))
+			TX_STAT_INC(qnum, xretries);
+		else
+			TX_STAT_INC(qnum, completed);
 	}
 
 	if (ts->ts_status & ATH9K_TXERR_FIFO)
@@ -1160,6 +1213,8 @@
 			    sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
 	debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
 			    sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
+	debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc, &fops_disable_ani);
 	debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
 			    sc, &fops_regidx);
 	debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 8ce6ad8..4a04510 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,6 +116,7 @@
 	u32 tx_bytes_all;
 	u32 queued;
 	u32 completed;
+	u32 xretries;
 	u32 a_aggr;
 	u32 a_queued_hw;
 	u32 a_queued_sw;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index aa6a731..57fe22b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -79,7 +79,7 @@
 
 	memset(&bs, 0, sizeof(bs));
 
-	intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+	intval = bss_conf->beacon_interval;
 	bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
 
 	/*
@@ -194,7 +194,7 @@
 	u8 cmd_rsp;
 	u64 tsf;
 
-	intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+	intval = bss_conf->beacon_interval;
 	intval /= ATH9K_HTC_MAX_BCN_VIF;
 	nexttbtt = intval;
 
@@ -250,7 +250,7 @@
 	u8 cmd_rsp;
 	u64 tsf;
 
-	intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+	intval = bss_conf->beacon_interval;
 	nexttbtt = intval;
 
 	/*
@@ -427,7 +427,7 @@
 	u16 intval;
 	int slot;
 
-	intval = priv->cur_beacon_conf.beacon_interval & ATH9K_BEACON_PERIOD;
+	intval = priv->cur_beacon_conf.beacon_interval;
 
 	tsf = be64_to_cpu(swba->tsf);
 	tsftu = TSF_TO_TU(tsf >> 32, tsf);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 2f3e072..cb29e88 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -39,11 +39,6 @@
 	ath9k_hw_ops(ah)->set_desc_link(ds, link);
 }
 
-static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
-					  u32 **link)
-{
-	ath9k_hw_ops(ah)->get_desc_link(ds, link);
-}
 static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
 				      struct ath9k_channel *chan,
 				      u8 rxchainmask,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1be7c8b..6de2655 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1785,16 +1785,16 @@
 	REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
 
 	REG_WRITE(ah, AR_BEACON_PERIOD,
-		  TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+		  TU_TO_USEC(bs->bs_intval));
 	REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
-		  TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+		  TU_TO_USEC(bs->bs_intval));
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
 	REG_RMW_FIELD(ah, AR_RSSI_THR,
 		      AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
 
-	beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
+	beaconintval = bs->bs_intval;
 
 	if (bs->bs_sleepduration > beaconintval)
 		beaconintval = bs->bs_sleepduration;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 4b157c5..6a6fb54 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -403,7 +403,6 @@
 	u32 bs_nexttbtt;
 	u32 bs_nextdtim;
 	u32 bs_intval;
-#define ATH9K_BEACON_PERIOD       0x0000ffff
 #define ATH9K_TSFOOR_THRESHOLD    0x00004240 /* 16k us */
 	u32 bs_dtimperiod;
 	u16 bs_cfpperiod;
@@ -603,7 +602,6 @@
 				     int power_off);
 	void (*rx_enable)(struct ath_hw *ah);
 	void (*set_desc_link)(void *ds, u32 link);
-	void (*get_desc_link)(void *ds, u32 **link);
 	bool (*calibrate)(struct ath_hw *ah,
 			  struct ath9k_channel *chan,
 			  u8 rxchainmask,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 45c585a..d4b166c 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -519,7 +519,6 @@
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	int i = 0;
-
 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
 
 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
@@ -585,6 +584,7 @@
 	common->priv = sc;
 	common->debug_mask = ath9k_debug;
 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
+	common->disable_ani = false;
 	spin_lock_init(&common->cc_lock);
 
 	spin_lock_init(&sc->sc_serial_rw);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2ca351f..7f94533 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -62,14 +62,12 @@
 
 	if (txq->axq_depth || !list_empty(&txq->axq_acq))
 		pending = true;
-	else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
-		pending = !list_empty(&txq->txq_fifo_pending);
 
 	spin_unlock_bh(&txq->axq_lock);
 	return pending;
 }
 
-bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
 {
 	unsigned long flags;
 	bool ret;
@@ -136,7 +134,7 @@
 	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 }
 
-static void ath_start_ani(struct ath_common *common)
+void ath_start_ani(struct ath_common *common)
 {
 	struct ath_hw *ah = common->ah;
 	unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -219,7 +217,7 @@
  * by reseting the chip.  To accomplish this we must first cleanup any pending
  * DMA, then restart stuff.
 */
-int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
+static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 		    struct ath9k_channel *hchan)
 {
 	struct ath_hw *ah = sc->sc_ah;
@@ -302,7 +300,8 @@
 			ath_set_beacon(sc);
 		ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
-		ath_start_ani(common);
+		if (!common->disable_ani)
+			ath_start_ani(common);
 	}
 
  ps_restore:
@@ -394,12 +393,14 @@
 	if (!caldata)
 		return;
 
+	ath9k_ps_wakeup(sc);
+
 	if (ar9003_paprd_init_table(ah) < 0)
-		return;
+		goto fail_paprd;
 
 	skb = alloc_skb(len, GFP_KERNEL);
 	if (!skb)
-		return;
+		goto fail_paprd;
 
 	skb_put(skb, len);
 	memset(skb->data, 0, len);
@@ -411,7 +412,6 @@
 	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
 	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 
-	ath9k_ps_wakeup(sc);
 	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
 		if (!(common->tx_chainmask & BIT(chain)))
 			continue;
@@ -515,24 +515,19 @@
 		common->ani.checkani_timer = timestamp;
 	}
 
-	/* Skip all processing if there's nothing to do. */
-	if (longcal || shortcal || aniflag) {
-		/* Call ANI routine if necessary */
-		if (aniflag) {
-			spin_lock_irqsave(&common->cc_lock, flags);
-			ath9k_hw_ani_monitor(ah, ah->curchan);
-			ath_update_survey_stats(sc);
-			spin_unlock_irqrestore(&common->cc_lock, flags);
-		}
+	/* Call ANI routine if necessary */
+	if (aniflag) {
+		spin_lock_irqsave(&common->cc_lock, flags);
+		ath9k_hw_ani_monitor(ah, ah->curchan);
+		ath_update_survey_stats(sc);
+		spin_unlock_irqrestore(&common->cc_lock, flags);
+	}
 
-		/* Perform calibration if necessary */
-		if (longcal || shortcal) {
-			common->ani.caldone =
-				ath9k_hw_calibrate(ah,
-						   ah->curchan,
-						   common->rx_chainmask,
-						   longcal);
-		}
+	/* Perform calibration if necessary */
+	if (longcal || shortcal) {
+		common->ani.caldone =
+			ath9k_hw_calibrate(ah, ah->curchan,
+						common->rx_chainmask, longcal);
 	}
 
 	ath9k_ps_restore(sc);
@@ -868,7 +863,7 @@
 #undef SCHED_INTR
 }
 
-void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -974,6 +969,7 @@
 	sc->hw_busy_count = 0;
 
 	/* Stop ANI */
+
 	del_timer_sync(&common->ani.timer);
 
 	ath9k_ps_wakeup(sc);
@@ -1023,7 +1019,9 @@
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
 	/* Start ANI */
-	ath_start_ani(common);
+	if (!common->disable_ani)
+		ath_start_ani(common);
+
 	ath9k_ps_restore(sc);
 
 	return r;
@@ -1412,10 +1410,14 @@
 	ath9k_hw_set_interrupts(ah, ah->imask);
 
 	/* Set up ANI */
-	if ((iter_data.naps + iter_data.nadhocs) > 0) {
+	if (iter_data.naps > 0) {
 		sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-		sc->sc_flags |= SC_OP_ANI_RUN;
-		ath_start_ani(common);
+
+		if (!common->disable_ani) {
+			sc->sc_flags |= SC_OP_ANI_RUN;
+			ath_start_ani(common);
+		}
+
 	} else {
 		sc->sc_flags &= ~SC_OP_ANI_RUN;
 		del_timer_sync(&common->ani.timer);
@@ -1952,50 +1954,38 @@
 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 	struct ath_vif *avp = (void *)vif->drv_priv;
 
-	switch (sc->sc_ah->opmode) {
-	case NL80211_IFTYPE_ADHOC:
-		/* There can be only one vif available */
+	/*
+	 * Skip iteration if primary station vif's bss info
+	 * was not changed
+	 */
+	if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
+		return;
+
+	if (bss_conf->assoc) {
+		sc->sc_flags |= SC_OP_PRIM_STA_VIF;
+		avp->primary_sta_vif = true;
 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
 		common->curaid = bss_conf->aid;
 		ath9k_hw_write_associd(sc->sc_ah);
-		/* configure beacon */
-		if (bss_conf->enable_beacon)
-			ath_beacon_config(sc, vif);
-		break;
-	case NL80211_IFTYPE_STATION:
-		/*
-		 * Skip iteration if primary station vif's bss info
-		 * was not changed
-		 */
-		if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
-			break;
-
-		if (bss_conf->assoc) {
-			sc->sc_flags |= SC_OP_PRIM_STA_VIF;
-			avp->primary_sta_vif = true;
-			memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
-			common->curaid = bss_conf->aid;
-			ath9k_hw_write_associd(sc->sc_ah);
-			ath_dbg(common, ATH_DBG_CONFIG,
+		ath_dbg(common, ATH_DBG_CONFIG,
 				"Bss Info ASSOC %d, bssid: %pM\n",
 				bss_conf->aid, common->curbssid);
-			ath_beacon_config(sc, vif);
-			/*
-			 * Request a re-configuration of Beacon related timers
-			 * on the receipt of the first Beacon frame (i.e.,
-			 * after time sync with the AP).
-			 */
-			sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
-			/* Reset rssi stats */
-			sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
-			sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+		ath_beacon_config(sc, vif);
+		/*
+		 * Request a re-configuration of Beacon related timers
+		 * on the receipt of the first Beacon frame (i.e.,
+		 * after time sync with the AP).
+		 */
+		sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+		/* Reset rssi stats */
+		sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+		sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
+		if (!common->disable_ani) {
 			sc->sc_flags |= SC_OP_ANI_RUN;
 			ath_start_ani(common);
 		}
-		break;
-	default:
-		break;
+
 	}
 }
 
@@ -2005,6 +1995,9 @@
 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 	struct ath_vif *avp = (void *)vif->drv_priv;
 
+	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
+		return;
+
 	/* Reconfigure bss info */
 	if (avp->primary_sta_vif && !bss_conf->assoc) {
 		ath_dbg(common, ATH_DBG_CONFIG,
@@ -2023,8 +2016,7 @@
 	 * None of station vifs are associated.
 	 * Clear bssid & aid
 	 */
-	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
-	    !(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
+	if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
 		ath9k_hw_write_associd(sc->sc_ah);
 		/* Stop ANI */
 		sc->sc_flags &= ~SC_OP_ANI_RUN;
@@ -2054,6 +2046,26 @@
 			common->curbssid, common->curaid);
 	}
 
+	if (changed & BSS_CHANGED_IBSS) {
+		/* There can be only one vif available */
+		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+		common->curaid = bss_conf->aid;
+		ath9k_hw_write_associd(sc->sc_ah);
+
+		if (bss_conf->ibss_joined) {
+			sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+
+			if (!common->disable_ani) {
+				sc->sc_flags |= SC_OP_ANI_RUN;
+				ath_start_ani(common);
+			}
+
+		} else {
+			sc->sc_flags &= ~SC_OP_ANI_RUN;
+			del_timer_sync(&common->ani.timer);
+		}
+	}
+
 	/* Enable transmission of beacons (AP, IBSS, MESH) */
 	if ((changed & BSS_CHANGED_BEACON) ||
 	    ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
@@ -2334,7 +2346,7 @@
 	return false;
 }
 
-int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
+static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3779b89..ec012b4 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@
 				struct ath_txq *txq, struct list_head *bf_q,
 				struct ath_tx_status *ts, int txok, int sendbar);
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
-			     struct list_head *head);
+			     struct list_head *head, bool internal);
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
 			     struct ath_tx_status *ts, int nframes, int nbad,
@@ -377,8 +377,7 @@
 			bf_next = bf->bf_next;
 
 			bf->bf_state.bf_type |= BUF_XRETRY;
-			if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
-			    !bf->bf_stale || bf_next != NULL)
+			if (!bf->bf_stale || bf_next != NULL)
 				list_move_tail(&bf->list, &bf_head);
 
 			ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
@@ -463,20 +462,14 @@
 			}
 		}
 
-		if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
-		    bf_next == NULL) {
-			/*
-			 * Make sure the last desc is reclaimed if it
-			 * not a holding desc.
-			 */
-			if (!bf_last->bf_stale)
-				list_move_tail(&bf->list, &bf_head);
-			else
-				INIT_LIST_HEAD(&bf_head);
-		} else {
-			BUG_ON(list_empty(bf_q));
+		/*
+		 * Make sure the last desc is reclaimed if it
+		 * not a holding desc.
+		 */
+		if (!bf_last->bf_stale || bf_next != NULL)
 			list_move_tail(&bf->list, &bf_head);
-		}
+		else
+			INIT_LIST_HEAD(&bf_head);
 
 		if (!txpending || (tid->state & AGGR_CLEANUP)) {
 			/*
@@ -837,7 +830,7 @@
 			bf->bf_state.bf_type &= ~BUF_AGGR;
 			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
 			ath_buf_set_rate(sc, bf, fi->framelen);
-			ath_tx_txqaddbuf(sc, txq, &bf_q);
+			ath_tx_txqaddbuf(sc, txq, &bf_q, false);
 			continue;
 		}
 
@@ -849,7 +842,7 @@
 		/* anchor last desc of aggregate */
 		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
 
-		ath_tx_txqaddbuf(sc, txq, &bf_q);
+		ath_tx_txqaddbuf(sc, txq, &bf_q, false);
 		TX_STAT_INC(txq->axq_qnum, a_aggr);
 
 	} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
@@ -1085,7 +1078,6 @@
 		txq->txq_headidx = txq->txq_tailidx = 0;
 		for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
 			INIT_LIST_HEAD(&txq->txq_fifo[i]);
-		INIT_LIST_HEAD(&txq->txq_fifo_pending);
 	}
 	return &sc->tx.txq[axq_qnum];
 }
@@ -1155,13 +1147,8 @@
     return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
 }
 
-/*
- * Drain a given TX queue (could be Beacon or Data)
- *
- * This assumes output has been stopped and
- * we do not need to block ath_tx_tasklet.
- */
-void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
+			       struct list_head *list, bool retry_tx)
 {
 	struct ath_buf *bf, *lastbf;
 	struct list_head bf_head;
@@ -1170,93 +1157,63 @@
 	memset(&ts, 0, sizeof(ts));
 	INIT_LIST_HEAD(&bf_head);
 
-	for (;;) {
-		spin_lock_bh(&txq->axq_lock);
+	while (!list_empty(list)) {
+		bf = list_first_entry(list, struct ath_buf, list);
 
-		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
-			if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
-				txq->txq_headidx = txq->txq_tailidx = 0;
-				spin_unlock_bh(&txq->axq_lock);
-				break;
-			} else {
-				bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
-						      struct ath_buf, list);
-			}
-		} else {
-			if (list_empty(&txq->axq_q)) {
-				txq->axq_link = NULL;
-				spin_unlock_bh(&txq->axq_lock);
-				break;
-			}
-			bf = list_first_entry(&txq->axq_q, struct ath_buf,
-					      list);
+		if (bf->bf_stale) {
+			list_del(&bf->list);
 
-			if (bf->bf_stale) {
-				list_del(&bf->list);
-				spin_unlock_bh(&txq->axq_lock);
-
-				ath_tx_return_buffer(sc, bf);
-				continue;
-			}
+			ath_tx_return_buffer(sc, bf);
+			continue;
 		}
 
 		lastbf = bf->bf_lastbf;
-
-		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
-			list_cut_position(&bf_head,
-					  &txq->txq_fifo[txq->txq_tailidx],
-					  &lastbf->list);
-			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
-		} else {
-			/* remove ath_buf's of the same mpdu from txq */
-			list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
-		}
+		list_cut_position(&bf_head, list, &lastbf->list);
 
 		txq->axq_depth--;
 		if (bf_is_ampdu_not_probing(bf))
 			txq->axq_ampdu_depth--;
-		spin_unlock_bh(&txq->axq_lock);
 
+		spin_unlock_bh(&txq->axq_lock);
 		if (bf_isampdu(bf))
 			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
 					     retry_tx);
 		else
 			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
-	}
-
-	spin_lock_bh(&txq->axq_lock);
-	txq->axq_tx_inprogress = false;
-	spin_unlock_bh(&txq->axq_lock);
-
-	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		spin_lock_bh(&txq->axq_lock);
-		while (!list_empty(&txq->txq_fifo_pending)) {
-			bf = list_first_entry(&txq->txq_fifo_pending,
-					      struct ath_buf, list);
-			list_cut_position(&bf_head,
-					  &txq->txq_fifo_pending,
-					  &bf->bf_lastbf->list);
-			spin_unlock_bh(&txq->axq_lock);
-
-			if (bf_isampdu(bf))
-				ath_tx_complete_aggr(sc, txq, bf, &bf_head,
-						     &ts, 0, retry_tx);
-			else
-				ath_tx_complete_buf(sc, bf, txq, &bf_head,
-						    &ts, 0, 0);
-			spin_lock_bh(&txq->axq_lock);
-		}
-		spin_unlock_bh(&txq->axq_lock);
 	}
+}
+
+/*
+ * Drain a given TX queue (could be Beacon or Data)
+ *
+ * This assumes output has been stopped and
+ * we do not need to block ath_tx_tasklet.
+ */
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+{
+	spin_lock_bh(&txq->axq_lock);
+	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+		int idx = txq->txq_tailidx;
+
+		while (!list_empty(&txq->txq_fifo[idx])) {
+			ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
+					   retry_tx);
+
+			INCR(idx, ATH_TXFIFO_DEPTH);
+		}
+		txq->txq_tailidx = idx;
+	}
+
+	txq->axq_link = NULL;
+	txq->axq_tx_inprogress = false;
+	ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
 
 	/* flush any pending frames if aggregation is enabled */
-	if (sc->sc_flags & SC_OP_TXAGGR) {
-		if (!retry_tx) {
-			spin_lock_bh(&txq->axq_lock);
-			ath_txq_drain_pending_buffers(sc, txq);
-			spin_unlock_bh(&txq->axq_lock);
-		}
-	}
+	if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
+		ath_txq_drain_pending_buffers(sc, txq);
+
+	spin_unlock_bh(&txq->axq_lock);
 }
 
 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1370,11 +1327,13 @@
  * assume the descriptors are already chained together by caller.
  */
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
-			     struct list_head *head)
+			     struct list_head *head, bool internal)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	struct ath_buf *bf;
+	struct ath_buf *bf, *bf_last;
+	bool puttxbuf = false;
+	bool edma;
 
 	/*
 	 * Insert the frame on the outbound list and
@@ -1384,51 +1343,49 @@
 	if (list_empty(head))
 		return;
 
+	edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
 	bf = list_first_entry(head, struct ath_buf, list);
+	bf_last = list_entry(head->prev, struct ath_buf, list);
 
 	ath_dbg(common, ATH_DBG_QUEUE,
 		"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
 
-	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
-		if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
-			list_splice_tail_init(head, &txq->txq_fifo_pending);
-			return;
-		}
-		if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
-			ath_dbg(common, ATH_DBG_XMIT,
-				"Initializing tx fifo %d which is non-empty\n",
-				txq->txq_headidx);
-		INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
-		list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+	if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
+		list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
 		INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
-		TX_STAT_INC(txq->axq_qnum, puttxbuf);
-		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
-		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
-			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+		puttxbuf = true;
 	} else {
 		list_splice_tail_init(head, &txq->axq_q);
 
-		if (txq->axq_link == NULL) {
-			TX_STAT_INC(txq->axq_qnum, puttxbuf);
-			ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
-			ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
-				txq->axq_qnum, ito64(bf->bf_daddr),
-				bf->bf_desc);
-		} else {
-			*txq->axq_link = bf->bf_daddr;
+		if (txq->axq_link) {
+			ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
 			ath_dbg(common, ATH_DBG_XMIT,
 				"link[%u] (%p)=%llx (%p)\n",
 				txq->axq_qnum, txq->axq_link,
 				ito64(bf->bf_daddr), bf->bf_desc);
-		}
-		ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
-				       &txq->axq_link);
+		} else if (!edma)
+			puttxbuf = true;
+
+		txq->axq_link = bf_last->bf_desc;
+	}
+
+	if (puttxbuf) {
+		TX_STAT_INC(txq->axq_qnum, puttxbuf);
+		ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+		ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+			txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+	}
+
+	if (!edma) {
 		TX_STAT_INC(txq->axq_qnum, txstart);
 		ath9k_hw_txstart(ah, txq->axq_qnum);
 	}
-	txq->axq_depth++;
-	if (bf_is_ampdu_not_probing(bf))
-		txq->axq_ampdu_depth++;
+
+	if (!internal) {
+		txq->axq_depth++;
+		if (bf_is_ampdu_not_probing(bf))
+			txq->axq_ampdu_depth++;
+	}
 }
 
 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1470,7 +1427,7 @@
 	TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
 	bf->bf_lastbf = bf;
 	ath_buf_set_rate(sc, bf, fi->framelen);
-	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
+	ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
 }
 
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@@ -1490,7 +1447,7 @@
 	bf->bf_lastbf = bf;
 	fi = get_frame_info(bf->bf_mpdu);
 	ath_buf_set_rate(sc, bf, fi->framelen);
-	ath_tx_txqaddbuf(sc, txq, bf_head);
+	ath_tx_txqaddbuf(sc, txq, bf_head, false);
 	TX_STAT_INC(txq->axq_qnum, queued);
 }
 
@@ -2077,6 +2034,38 @@
 	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
 }
 
+static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+				  struct ath_tx_status *ts, struct ath_buf *bf,
+				  struct list_head *bf_head)
+{
+	int txok;
+
+	txq->axq_depth--;
+	txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+	txq->axq_tx_inprogress = false;
+	if (bf_is_ampdu_not_probing(bf))
+		txq->axq_ampdu_depth--;
+
+	spin_unlock_bh(&txq->axq_lock);
+
+	if (!bf_isampdu(bf)) {
+		/*
+		 * This frame is sent out as a single frame.
+		 * Use hardware retry status for this frame.
+		 */
+		if (ts->ts_status & ATH9K_TXERR_XRETRY)
+			bf->bf_state.bf_type |= BUF_XRETRY;
+		ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
+		ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+	} else
+		ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
+
+	spin_lock_bh(&txq->axq_lock);
+
+	if (sc->sc_flags & SC_OP_TXAGGR)
+		ath_txq_schedule(sc, txq);
+}
+
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 {
 	struct ath_hw *ah = sc->sc_ah;
@@ -2085,20 +2074,18 @@
 	struct list_head bf_head;
 	struct ath_desc *ds;
 	struct ath_tx_status ts;
-	int txok;
 	int status;
 
 	ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
 		txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
 		txq->axq_link);
 
+	spin_lock_bh(&txq->axq_lock);
 	for (;;) {
-		spin_lock_bh(&txq->axq_lock);
 		if (list_empty(&txq->axq_q)) {
 			txq->axq_link = NULL;
 			if (sc->sc_flags & SC_OP_TXAGGR)
 				ath_txq_schedule(sc, txq);
-			spin_unlock_bh(&txq->axq_lock);
 			break;
 		}
 		bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2114,13 +2101,11 @@
 		bf_held = NULL;
 		if (bf->bf_stale) {
 			bf_held = bf;
-			if (list_is_last(&bf_held->list, &txq->axq_q)) {
-				spin_unlock_bh(&txq->axq_lock);
+			if (list_is_last(&bf_held->list, &txq->axq_q))
 				break;
-			} else {
-				bf = list_entry(bf_held->list.next,
-						struct ath_buf, list);
-			}
+
+			bf = list_entry(bf_held->list.next, struct ath_buf,
+					list);
 		}
 
 		lastbf = bf->bf_lastbf;
@@ -2128,10 +2113,9 @@
 
 		memset(&ts, 0, sizeof(ts));
 		status = ath9k_hw_txprocdesc(ah, ds, &ts);
-		if (status == -EINPROGRESS) {
-			spin_unlock_bh(&txq->axq_lock);
+		if (status == -EINPROGRESS)
 			break;
-		}
+
 		TX_STAT_INC(txq->axq_qnum, txprocdesc);
 
 		/*
@@ -2145,42 +2129,14 @@
 			list_cut_position(&bf_head,
 				&txq->axq_q, lastbf->list.prev);
 
-		txq->axq_depth--;
-		txok = !(ts.ts_status & ATH9K_TXERR_MASK);
-		txq->axq_tx_inprogress = false;
-		if (bf_held)
+		if (bf_held) {
 			list_del(&bf_held->list);
-
-		if (bf_is_ampdu_not_probing(bf))
-			txq->axq_ampdu_depth--;
-
-		spin_unlock_bh(&txq->axq_lock);
-
-		if (bf_held)
 			ath_tx_return_buffer(sc, bf_held);
-
-		if (!bf_isampdu(bf)) {
-			/*
-			 * This frame is sent out as a single frame.
-			 * Use hardware retry status for this frame.
-			 */
-			if (ts.ts_status & ATH9K_TXERR_XRETRY)
-				bf->bf_state.bf_type |= BUF_XRETRY;
-			ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
 		}
 
-		if (bf_isampdu(bf))
-			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
-					     true);
-		else
-			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
-
-		spin_lock_bh(&txq->axq_lock);
-
-		if (sc->sc_flags & SC_OP_TXAGGR)
-			ath_txq_schedule(sc, txq);
-		spin_unlock_bh(&txq->axq_lock);
+		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
 	}
+	spin_unlock_bh(&txq->axq_lock);
 }
 
 static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2237,17 +2193,16 @@
 
 void ath_tx_edma_tasklet(struct ath_softc *sc)
 {
-	struct ath_tx_status txs;
+	struct ath_tx_status ts;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_txq *txq;
 	struct ath_buf *bf, *lastbf;
 	struct list_head bf_head;
 	int status;
-	int txok;
 
 	for (;;) {
-		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+		status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
 		if (status == -EINPROGRESS)
 			break;
 		if (status == -EIO) {
@@ -2257,12 +2212,13 @@
 		}
 
 		/* Skip beacon completions */
-		if (txs.qid == sc->beacon.beaconq)
+		if (ts.qid == sc->beacon.beaconq)
 			continue;
 
-		txq = &sc->tx.txq[txs.qid];
+		txq = &sc->tx.txq[ts.qid];
 
 		spin_lock_bh(&txq->axq_lock);
+
 		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
 			spin_unlock_bh(&txq->axq_lock);
 			return;
@@ -2275,41 +2231,21 @@
 		INIT_LIST_HEAD(&bf_head);
 		list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
 				  &lastbf->list);
-		INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
-		txq->axq_depth--;
-		txq->axq_tx_inprogress = false;
-		if (bf_is_ampdu_not_probing(bf))
-			txq->axq_ampdu_depth--;
-		spin_unlock_bh(&txq->axq_lock);
 
-		txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+		if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+			INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
 
-		if (!bf_isampdu(bf)) {
-			if (txs.ts_status & ATH9K_TXERR_XRETRY)
-				bf->bf_state.bf_type |= BUF_XRETRY;
-			ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
+			if (!list_empty(&txq->axq_q)) {
+				struct list_head bf_q;
+
+				INIT_LIST_HEAD(&bf_q);
+				txq->axq_link = NULL;
+				list_splice_tail_init(&txq->axq_q, &bf_q);
+				ath_tx_txqaddbuf(sc, txq, &bf_q, true);
+			}
 		}
 
-		if (bf_isampdu(bf))
-			ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
-					     txok, true);
-		else
-			ath_tx_complete_buf(sc, bf, txq, &bf_head,
-					    &txs, txok, 0);
-
-		spin_lock_bh(&txq->axq_lock);
-
-		if (!list_empty(&txq->txq_fifo_pending)) {
-			INIT_LIST_HEAD(&bf_head);
-			bf = list_first_entry(&txq->txq_fifo_pending,
-					      struct ath_buf, list);
-			list_cut_position(&bf_head,
-					  &txq->txq_fifo_pending,
-					  &bf->bf_lastbf->list);
-			ath_tx_txqaddbuf(sc, txq, &bf_head);
-		} else if (sc->sc_flags & SC_OP_TXAGGR)
-			ath_txq_schedule(sc, txq);
-
+		ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
 		spin_unlock_bh(&txq->axq_lock);
 	}
 }
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 39a11e8..7e45ca2 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -40,6 +40,7 @@
 ******************************************************************************/
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 
 #include <linux/kernel.h>
 #include <linux/ptrace.h>
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 480595f..fe26bf4 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -26,6 +26,11 @@
 	  This driver can be built as a module (recommended) that will be called "b43".
 	  If unsure, say M.
 
+config B43_BCMA
+	bool "Support for BCMA bus"
+	depends on B43 && BCMA && BROKEN
+	default y
+
 # Auto-select SSB PCI-HOST support, if possible
 config B43_PCI_AUTOSELECT
 	bool
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index cef334a..95f7c00 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,4 +1,5 @@
 b43-y				+= main.o
+b43-y				+= bus.o
 b43-y				+= tables.o
 b43-$(CONFIG_B43_PHY_N)		+= tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)		+= radio_2055.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 25a78cf..1cb2dde 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -5,12 +5,14 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/hw_random.h>
+#include <linux/bcma/bcma.h>
 #include <linux/ssb/ssb.h>
 #include <net/mac80211.h>
 
 #include "debugfs.h"
 #include "leds.h"
 #include "rfkill.h"
+#include "bus.h"
 #include "lo.h"
 #include "phy_common.h"
 
@@ -414,6 +416,17 @@
 #define B43_MACCMD_CCA			0x00000008	/* Clear channel assessment */
 #define B43_MACCMD_BGNOISE		0x00000010	/* Background noise */
 
+/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */
+#define B43_BCMA_IOCTL_PHY_CLKEN	0x00000004	/* PHY Clock Enable */
+#define B43_BCMA_IOCTL_PHY_RESET	0x00000008	/* PHY Reset */
+#define B43_BCMA_IOCTL_MACPHYCLKEN	0x00000010	/* MAC PHY Clock Control Enable */
+#define B43_BCMA_IOCTL_PLLREFSEL	0x00000020	/* PLL Frequency Reference Select */
+#define B43_BCMA_IOCTL_PHY_BW		0x000000C0	/* PHY band width and clock speed mask (N-PHY+ only?) */
+#define  B43_BCMA_IOCTL_PHY_BW_10MHZ	0x00000000	/* 10 MHz bandwidth, 40 MHz PHY */
+#define  B43_BCMA_IOCTL_PHY_BW_20MHZ	0x00000040	/* 20 MHz bandwidth, 80 MHz PHY */
+#define  B43_BCMA_IOCTL_PHY_BW_40MHZ	0x00000080	/* 40 MHz bandwidth, 160 MHz PHY */
+#define B43_BCMA_IOCTL_GMODE		0x00002000	/* G Mode Enable */
+
 /* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
 #define B43_TMSLOW_GMODE		0x20000000	/* G Mode Enable */
 #define B43_TMSLOW_PHY_BANDWIDTH	0x00C00000	/* PHY band width and clock speed mask (N-PHY only) */
@@ -707,7 +720,8 @@
 
 /* Data structure for one wireless device (802.11 core) */
 struct b43_wldev {
-	struct ssb_device *sdev;
+	struct ssb_device *sdev; /* TODO: remove when b43_bus_dev is ready */
+	struct b43_bus_dev *dev;
 	struct b43_wl *wl;
 
 	/* The device initialization status.
@@ -879,36 +893,59 @@
 	return wl->hw->conf.channel->band;
 }
 
+static inline int b43_bus_may_powerdown(struct b43_wldev *wldev)
+{
+	return wldev->dev->bus_may_powerdown(wldev->dev);
+}
+static inline int b43_bus_powerup(struct b43_wldev *wldev, bool dynamic_pctl)
+{
+	return wldev->dev->bus_powerup(wldev->dev, dynamic_pctl);
+}
+static inline int b43_device_is_enabled(struct b43_wldev *wldev)
+{
+	return wldev->dev->device_is_enabled(wldev->dev);
+}
+static inline void b43_device_enable(struct b43_wldev *wldev,
+				     u32 core_specific_flags)
+{
+	wldev->dev->device_enable(wldev->dev, core_specific_flags);
+}
+static inline void b43_device_disable(struct b43_wldev *wldev,
+				      u32 core_specific_flags)
+{
+	wldev->dev->device_disable(wldev->dev, core_specific_flags);
+}
+
 static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
 {
-	return ssb_read16(dev->sdev, offset);
+	return dev->dev->read16(dev->dev, offset);
 }
 
 static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value)
 {
-	ssb_write16(dev->sdev, offset, value);
+	dev->dev->write16(dev->dev, offset, value);
 }
 
 static inline u32 b43_read32(struct b43_wldev *dev, u16 offset)
 {
-	return ssb_read32(dev->sdev, offset);
+	return dev->dev->read32(dev->dev, offset);
 }
 
 static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
 {
-	ssb_write32(dev->sdev, offset, value);
+	dev->dev->write32(dev->dev, offset, value);
 }
 
 static inline void b43_block_read(struct b43_wldev *dev, void *buffer,
 				 size_t count, u16 offset, u8 reg_width)
 {
-	ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
+	dev->dev->block_read(dev->dev, buffer, count, offset, reg_width);
 }
 
 static inline void b43_block_write(struct b43_wldev *dev, const void *buffer,
 				   size_t count, u16 offset, u8 reg_width)
 {
-	ssb_block_write(dev->sdev, buffer, count, offset, reg_width);
+	dev->dev->block_write(dev->dev, buffer, count, offset, reg_width);
 }
 
 static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
new file mode 100644
index 0000000..6c63aec
--- /dev/null
+++ b/drivers/net/wireless/b43/bus.c
@@ -0,0 +1,122 @@
+/*
+
+  Broadcom B43 wireless driver
+  Bus abstraction layer
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "bus.h"
+
+
+/* SSB */
+
+static inline int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
+{
+	return ssb_bus_may_powerdown(dev->sdev->bus);
+}
+static inline int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
+					  bool dynamic_pctl)
+{
+	return ssb_bus_powerup(dev->sdev->bus, dynamic_pctl);
+}
+static inline int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
+{
+	return ssb_device_is_enabled(dev->sdev);
+}
+static inline void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
+					     u32 core_specific_flags)
+{
+	ssb_device_enable(dev->sdev, core_specific_flags);
+}
+static inline void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
+					      u32 core_specific_flags)
+{
+	ssb_device_disable(dev->sdev, core_specific_flags);
+}
+
+static inline u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
+{
+	return ssb_read16(dev->sdev, offset);
+}
+static inline u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
+{
+	return ssb_read32(dev->sdev, offset);
+}
+static inline
+void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
+{
+	ssb_write16(dev->sdev, offset, value);
+}
+static inline
+void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
+{
+	ssb_write32(dev->sdev, offset, value);
+}
+static inline
+void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
+			    size_t count, u16 offset, u8 reg_width)
+{
+	ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
+}
+static inline
+void b43_bus_ssb_block_write(struct b43_bus_dev *dev, const void *buffer,
+			     size_t count, u16 offset, u8 reg_width)
+{
+	ssb_block_write(dev->sdev, buffer, count, offset, reg_width);
+}
+
+struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev)
+{
+	struct b43_bus_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+	dev->bus_type = B43_BUS_SSB;
+	dev->sdev = sdev;
+
+	dev->bus_may_powerdown = b43_bus_ssb_bus_may_powerdown;
+	dev->bus_powerup = b43_bus_ssb_bus_powerup;
+	dev->device_is_enabled = b43_bus_ssb_device_is_enabled;
+	dev->device_enable = b43_bus_ssb_device_enable;
+	dev->device_disable = b43_bus_ssb_device_disable;
+
+	dev->read16 = b43_bus_ssb_read16;
+	dev->read32 = b43_bus_ssb_read32;
+	dev->write16 = b43_bus_ssb_write16;
+	dev->write32 = b43_bus_ssb_write32;
+	dev->block_read = b43_bus_ssb_block_read;
+	dev->block_write = b43_bus_ssb_block_write;
+
+	dev->dev = sdev->dev;
+	dev->dma_dev = sdev->dma_dev;
+	dev->irq = sdev->irq;
+
+	dev->board_vendor = sdev->bus->boardinfo.vendor;
+	dev->board_type = sdev->bus->boardinfo.type;
+	dev->board_rev = sdev->bus->boardinfo.rev;
+
+	dev->chip_id = sdev->bus->chip_id;
+	dev->chip_rev = sdev->bus->chip_rev;
+	dev->chip_pkg = sdev->bus->chip_package;
+
+	dev->bus_sprom = &sdev->bus->sprom;
+
+	dev->core_id = sdev->id.coreid;
+	dev->core_rev = sdev->id.revision;
+
+	return dev;
+}
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
new file mode 100644
index 0000000..79a5ab4
--- /dev/null
+++ b/drivers/net/wireless/b43/bus.h
@@ -0,0 +1,62 @@
+#ifndef B43_BUS_H_
+#define B43_BUS_H_
+
+enum b43_bus_type {
+	B43_BUS_SSB,
+};
+
+struct b43_bus_dev {
+	enum b43_bus_type bus_type;
+	union {
+		struct ssb_device *sdev;
+	};
+
+	int (*bus_may_powerdown)(struct b43_bus_dev *dev);
+	int (*bus_powerup)(struct b43_bus_dev *dev, bool dynamic_pctl);
+	int (*device_is_enabled)(struct b43_bus_dev *dev);
+	void (*device_enable)(struct b43_bus_dev *dev,
+			      u32 core_specific_flags);
+	void (*device_disable)(struct b43_bus_dev *dev,
+			       u32 core_specific_flags);
+
+	u16 (*read16)(struct b43_bus_dev *dev, u16 offset);
+	u32 (*read32)(struct b43_bus_dev *dev, u16 offset);
+	void (*write16)(struct b43_bus_dev *dev, u16 offset, u16 value);
+	void (*write32)(struct b43_bus_dev *dev, u16 offset, u32 value);
+	void (*block_read)(struct b43_bus_dev *dev, void *buffer,
+			   size_t count, u16 offset, u8 reg_width);
+	void (*block_write)(struct b43_bus_dev *dev, const void *buffer,
+			    size_t count, u16 offset, u8 reg_width);
+
+	struct device *dev;
+	struct device *dma_dev;
+	unsigned int irq;
+
+	u16 board_vendor;
+	u16 board_type;
+	u16 board_rev;
+
+	u16 chip_id;
+	u8 chip_rev;
+	u8 chip_pkg;
+
+	struct ssb_sprom *bus_sprom;
+
+	u16 core_id;
+	u8 core_rev;
+};
+
+static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
+{
+	return (dev->bus_type == B43_BUS_SSB &&
+		dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
+}
+static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
+{
+	return (dev->bus_type == B43_BUS_SSB &&
+		dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
+}
+
+struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev);
+
+#endif /* B43_BUS_H_ */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 47d44bc..d02cf83 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,10 +333,10 @@
 	dma_addr_t dmaaddr;
 
 	if (tx) {
-		dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 					 buf, len, DMA_TO_DEVICE);
 	} else {
-		dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
+		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
 					 buf, len, DMA_FROM_DEVICE);
 	}
 
@@ -348,10 +348,10 @@
 			  dma_addr_t addr, size_t len, int tx)
 {
 	if (tx) {
-		dma_unmap_single(ring->dev->sdev->dma_dev,
+		dma_unmap_single(ring->dev->dev->dma_dev,
 				 addr, len, DMA_TO_DEVICE);
 	} else {
-		dma_unmap_single(ring->dev->sdev->dma_dev,
+		dma_unmap_single(ring->dev->dev->dma_dev,
 				 addr, len, DMA_FROM_DEVICE);
 	}
 }
@@ -361,7 +361,7 @@
 				 dma_addr_t addr, size_t len)
 {
 	B43_WARN_ON(ring->tx);
-	dma_sync_single_for_cpu(ring->dev->sdev->dma_dev,
+	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
 				    addr, len, DMA_FROM_DEVICE);
 }
 
@@ -370,7 +370,7 @@
 				    dma_addr_t addr, size_t len)
 {
 	B43_WARN_ON(ring->tx);
-	dma_sync_single_for_device(ring->dev->sdev->dma_dev,
+	dma_sync_single_for_device(ring->dev->dev->dma_dev,
 				   addr, len, DMA_FROM_DEVICE);
 }
 
@@ -401,7 +401,7 @@
 	 */
 	if (ring->type == B43_DMA_64BIT)
 		flags |= GFP_DMA;
-	ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev,
+	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
 					    B43_DMA_RINGMEMSIZE,
 					    &(ring->dmabase), flags);
 	if (!ring->descbase) {
@@ -415,7 +415,7 @@
 
 static void free_ringmemory(struct b43_dmaring *ring)
 {
-	dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE,
+	dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
 			  ring->descbase, ring->dmabase);
 }
 
@@ -523,7 +523,7 @@
 				  dma_addr_t addr,
 				  size_t buffersize, bool dma_to_device)
 {
-	if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr)))
+	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
 		return 1;
 
 	switch (ring->type) {
@@ -757,14 +757,14 @@
 
 static void free_all_descbuffers(struct b43_dmaring *ring)
 {
-	struct b43_dmadesc_generic *desc;
 	struct b43_dmadesc_meta *meta;
 	int i;
 
 	if (!ring->used_slots)
 		return;
 	for (i = 0; i < ring->nr_slots; i++) {
-		desc = ring->ops->idx2desc(ring, i, &meta);
+		/* get meta - ignore returned value */
+		ring->ops->idx2desc(ring, i, &meta);
 
 		if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
 			B43_WARN_ON(!ring->tx);
@@ -869,7 +869,7 @@
 			goto err_kfree_meta;
 
 		/* test for ability to dma to txhdr_cache */
-		dma_test = dma_map_single(dev->sdev->dma_dev,
+		dma_test = dma_map_single(dev->dev->dma_dev,
 					  ring->txhdr_cache,
 					  b43_txhdr_size(dev),
 					  DMA_TO_DEVICE);
@@ -884,7 +884,7 @@
 			if (!ring->txhdr_cache)
 				goto err_kfree_meta;
 
-			dma_test = dma_map_single(dev->sdev->dma_dev,
+			dma_test = dma_map_single(dev->dev->dma_dev,
 						  ring->txhdr_cache,
 						  b43_txhdr_size(dev),
 						  DMA_TO_DEVICE);
@@ -898,7 +898,7 @@
 			}
 		}
 
-		dma_unmap_single(dev->sdev->dma_dev,
+		dma_unmap_single(dev->dev->dma_dev,
 				 dma_test, b43_txhdr_size(dev),
 				 DMA_TO_DEVICE);
 	}
@@ -1013,9 +1013,9 @@
 	/* Try to set the DMA mask. If it fails, try falling back to a
 	 * lower mask, as we can always also support a lower one. */
 	while (1) {
-		err = dma_set_mask(dev->sdev->dma_dev, mask);
+		err = dma_set_mask(dev->dev->dma_dev, mask);
 		if (!err) {
-			err = dma_set_coherent_mask(dev->sdev->dma_dev, mask);
+			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
 			if (!err)
 				break;
 		}
@@ -1085,7 +1085,7 @@
 		goto err_destroy_mcast;
 
 	/* No support for the TX status DMA ring. */
-	B43_WARN_ON(dev->sdev->id.revision < 5);
+	B43_WARN_ON(dev->dev->core_rev < 5);
 
 	b43dbg(dev->wl, "%u-bit DMA initialized\n",
 	       (unsigned int)type);
@@ -1388,7 +1388,6 @@
 {
 	const struct b43_dma_ops *ops;
 	struct b43_dmaring *ring;
-	struct b43_dmadesc_generic *desc;
 	struct b43_dmadesc_meta *meta;
 	int slot, firstused;
 	bool frame_succeed;
@@ -1416,7 +1415,8 @@
 	ops = ring->ops;
 	while (1) {
 		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
-		desc = ops->idx2desc(ring, slot, &meta);
+		/* get meta - ignore returned value */
+		ops->idx2desc(ring, slot, &meta);
 
 		if (b43_dma_ptr_is_poisoned(meta->skb)) {
 			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 0cafafe..b56ed41 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -138,7 +138,7 @@
 	led->led_dev.default_trigger = default_trigger;
 	led->led_dev.brightness_set = b43_led_brightness_set;
 
-	err = led_classdev_register(dev->sdev->dev, &led->led_dev);
+	err = led_classdev_register(dev->dev->dev, &led->led_dev);
 	if (err) {
 		b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
 		led->wl = NULL;
@@ -215,13 +215,12 @@
 				  enum b43_led_behaviour *behaviour,
 				  bool *activelow)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	u8 sprom[4];
 
-	sprom[0] = bus->sprom.gpio0;
-	sprom[1] = bus->sprom.gpio1;
-	sprom[2] = bus->sprom.gpio2;
-	sprom[3] = bus->sprom.gpio3;
+	sprom[0] = dev->dev->bus_sprom->gpio0;
+	sprom[1] = dev->dev->bus_sprom->gpio1;
+	sprom[2] = dev->dev->bus_sprom->gpio2;
+	sprom[3] = dev->dev->bus_sprom->gpio3;
 
 	if (sprom[led_index] == 0xFF) {
 		/* There is no LED information in the SPROM
@@ -231,12 +230,12 @@
 		case 0:
 			*behaviour = B43_LED_ACTIVITY;
 			*activelow = 1;
-			if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
+			if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
 				*behaviour = B43_LED_RADIO_ALL;
 			break;
 		case 1:
 			*behaviour = B43_LED_RADIO_B;
-			if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
+			if (dev->dev->board_vendor == PCI_VENDOR_ID_ASUSTEK)
 				*behaviour = B43_LED_ASSOC;
 			break;
 		case 2:
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 2ef7d4b..a3dc8bb 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -98,7 +98,7 @@
 		rfover |= pga;
 		rfover |= lna;
 		rfover |= trsw_rx;
-		if ((dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA)
+		if ((dev->dev->bus_sprom->boardflags_lo & B43_BFL_EXTLNA)
 		    && phy->rev > 6)
 			rfover |= B43_PHY_RFOVERVAL_EXTLNA;
 
@@ -301,14 +301,12 @@
 		max_rx_gain = 0;
 
 	if (has_loopback_gain(phy)) {
-		int trsw_rx = 0;
 		int trsw_rx_gain;
 
 		if (use_trsw_rx) {
 			trsw_rx_gain = gphy->trsw_rx_gain / 2;
 			if (max_rx_gain >= trsw_rx_gain) {
 				trsw_rx_gain = max_rx_gain - trsw_rx_gain;
-				trsw_rx = 0x20;
 			}
 		} else
 			trsw_rx_gain = max_rx_gain;
@@ -387,7 +385,7 @@
 static void lo_measure_setup(struct b43_wldev *dev,
 			     struct lo_g_saved_values *sav)
 {
-	struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
 	struct b43_txpower_lo_control *lo = gphy->lo_control;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index eb41596..cae3146 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -113,6 +113,16 @@
 module_param_named(pio, b43_modparam_pio, int, 0644);
 MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
 
+#ifdef CONFIG_B43_BCMA
+static const struct bcma_device_id b43_bcma_tbl[] = {
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
+	BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, b43_bcma_tbl);
+#endif
+
 static const struct ssb_device_id b43_ssb_tbl[] = {
 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6),
@@ -548,7 +558,7 @@
 {
 	u32 low, high;
 
-	B43_WARN_ON(dev->sdev->id.revision < 3);
+	B43_WARN_ON(dev->dev->core_rev < 3);
 
 	/* The hardware guarantees us an atomic read, if we
 	 * read the low register first. */
@@ -586,7 +596,7 @@
 {
 	u32 low, high;
 
-	B43_WARN_ON(dev->sdev->id.revision < 3);
+	B43_WARN_ON(dev->dev->core_rev < 3);
 
 	low = tsf;
 	high = (tsf >> 32);
@@ -714,7 +724,7 @@
 		b43_ram_write(dev, i * 4, buffer[i]);
 
 	b43_write16(dev, 0x0568, 0x0000);
-	if (dev->sdev->id.revision < 11)
+	if (dev->dev->core_rev < 11)
 		b43_write16(dev, 0x07C0, 0x0000);
 	else
 		b43_write16(dev, 0x07C0, 0x0100);
@@ -1132,7 +1142,7 @@
 	b43_write32(dev, B43_MMIO_MACCTL, macctl);
 	/* Commit write */
 	b43_read32(dev, B43_MMIO_MACCTL);
-	if (awake && dev->sdev->id.revision >= 5) {
+	if (awake && dev->dev->core_rev >= 5) {
 		/* Wait for the microcode to wake up. */
 		for (i = 0; i < 100; i++) {
 			ucstat = b43_shm_read16(dev, B43_SHM_SHARED,
@@ -1144,35 +1154,39 @@
 	}
 }
 
-static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, u32 flags)
+static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
 {
+	struct ssb_device *sdev = dev->dev->sdev;
 	u32 tmslow;
+	u32 flags = 0;
 
+	if (gmode)
+		flags |= B43_TMSLOW_GMODE;
 	flags |= B43_TMSLOW_PHYCLKEN;
 	flags |= B43_TMSLOW_PHYRESET;
 	if (dev->phy.type == B43_PHYTYPE_N)
 		flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */
-	ssb_device_enable(dev->sdev, flags);
+	b43_device_enable(dev, flags);
 	msleep(2);		/* Wait for the PLL to turn on. */
 
 	/* Now take the PHY out of Reset again */
-	tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
+	tmslow = ssb_read32(sdev, SSB_TMSLOW);
 	tmslow |= SSB_TMSLOW_FGC;
 	tmslow &= ~B43_TMSLOW_PHYRESET;
-	ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
-	ssb_read32(dev->sdev, SSB_TMSLOW);	/* flush */
+	ssb_write32(sdev, SSB_TMSLOW, tmslow);
+	ssb_read32(sdev, SSB_TMSLOW);	/* flush */
 	msleep(1);
 	tmslow &= ~SSB_TMSLOW_FGC;
-	ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
-	ssb_read32(dev->sdev, SSB_TMSLOW);	/* flush */
+	ssb_write32(sdev, SSB_TMSLOW, tmslow);
+	ssb_read32(sdev, SSB_TMSLOW);	/* flush */
 	msleep(1);
 }
 
-void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
+void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
 {
 	u32 macctl;
 
-	b43_ssb_wireless_core_reset(dev, flags);
+	b43_ssb_wireless_core_reset(dev, gmode);
 
 	/* Turn Analog ON, but only if we already know the PHY-type.
 	 * This protects against very early setup where we don't know the
@@ -1183,7 +1197,7 @@
 
 	macctl = b43_read32(dev, B43_MMIO_MACCTL);
 	macctl &= ~B43_MACCTL_GMODE;
-	if (flags & B43_TMSLOW_GMODE)
+	if (gmode)
 		macctl |= B43_MACCTL_GMODE;
 	macctl |= B43_MACCTL_IHR_ENABLED;
 	b43_write32(dev, B43_MMIO_MACCTL, macctl);
@@ -1221,7 +1235,7 @@
 {
 	u32 dummy;
 
-	if (dev->sdev->id.revision < 5)
+	if (dev->dev->core_rev < 5)
 		return;
 	/* Read all entries from the microcode TXstatus FIFO
 	 * and throw them away.
@@ -1427,9 +1441,9 @@
 
 	/* Get the mask of available antennas. */
 	if (dev->phy.gmode)
-		antenna_mask = dev->sdev->bus->sprom.ant_available_bg;
+		antenna_mask = dev->dev->bus_sprom->ant_available_bg;
 	else
-		antenna_mask = dev->sdev->bus->sprom.ant_available_a;
+		antenna_mask = dev->dev->bus_sprom->ant_available_a;
 
 	if (!(antenna_mask & (1 << (antenna_nr - 1)))) {
 		/* This antenna is not available. Fall back to default. */
@@ -1644,7 +1658,7 @@
 	mutex_lock(&wl->mutex);
 	dev = wl->current_dev;
 	if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
-		if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+		if (b43_bus_host_is_sdio(dev->dev)) {
 			/* wl->mutex is enough. */
 			b43_do_beacon_update_trigger_work(dev);
 			mmiowb();
@@ -1689,7 +1703,7 @@
 static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
 {
 	b43_time_lock(dev);
-	if (dev->sdev->id.revision >= 3) {
+	if (dev->dev->core_rev >= 3) {
 		b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16));
 		b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10));
 	} else {
@@ -2063,7 +2077,7 @@
 		B43_WARN_ON(1);
 		return -ENOSYS;
 	}
-	err = request_firmware(&blob, ctx->fwname, ctx->dev->sdev->dev);
+	err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
 	if (err == -ENOENT) {
 		snprintf(ctx->errors[ctx->req_type],
 			 sizeof(ctx->errors[ctx->req_type]),
@@ -2113,7 +2127,7 @@
 {
 	struct b43_wldev *dev = ctx->dev;
 	struct b43_firmware *fw = &ctx->dev->fw;
-	const u8 rev = ctx->dev->sdev->id.revision;
+	const u8 rev = ctx->dev->dev->core_rev;
 	const char *filename;
 	u32 tmshigh;
 	int err;
@@ -2157,7 +2171,7 @@
 	switch (dev->phy.type) {
 	case B43_PHYTYPE_A:
 		if ((rev >= 5) && (rev <= 10)) {
-			tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
+			tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
 			if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
 				filename = "a0g1initvals5";
 			else
@@ -2202,7 +2216,7 @@
 	switch (dev->phy.type) {
 	case B43_PHYTYPE_A:
 		if ((rev >= 5) && (rev <= 10)) {
-			tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
+			tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
 			if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
 				filename = "a0g1bsinitvals5";
 			else
@@ -2448,7 +2462,7 @@
 
 	snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
 			dev->fw.rev, dev->fw.patch);
-	wiphy->hw_version = dev->sdev->id.coreid;
+	wiphy->hw_version = dev->dev->core_id;
 
 	if (b43_is_old_txhdr_format(dev)) {
 		/* We're over the deadline, but we keep support for old fw
@@ -2566,7 +2580,7 @@
  */
 static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 
 #ifdef CONFIG_SSB_DRIVER_PCICORE
 	return (bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev);
@@ -2588,7 +2602,7 @@
 
 	mask = 0x0000001F;
 	set = 0x0000000F;
-	if (dev->sdev->bus->chip_id == 0x4301) {
+	if (dev->dev->chip_id == 0x4301) {
 		mask |= 0x0060;
 		set |= 0x0060;
 	}
@@ -2599,14 +2613,14 @@
 		mask |= 0x0180;
 		set |= 0x0180;
 	}
-	if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) {
+	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
 		b43_write16(dev, B43_MMIO_GPIO_MASK,
 			    b43_read16(dev, B43_MMIO_GPIO_MASK)
 			    | 0x0200);
 		mask |= 0x0200;
 		set |= 0x0200;
 	}
-	if (dev->sdev->id.revision >= 2)
+	if (dev->dev->core_rev >= 2)
 		mask |= 0x0010;	/* FIXME: This is redundant. */
 
 	gpiodev = b43_ssb_gpio_dev(dev);
@@ -2741,15 +2755,15 @@
 	/* Workaround: On old hardware the HW-MAC-address-filter
 	 * doesn't work properly, so always run promisc in filter
 	 * it in software. */
-	if (dev->sdev->id.revision <= 4)
+	if (dev->dev->core_rev <= 4)
 		ctl |= B43_MACCTL_PROMISC;
 
 	b43_write32(dev, B43_MMIO_MACCTL, ctl);
 
 	cfp_pretbtt = 2;
 	if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) {
-		if (dev->sdev->bus->chip_id == 0x4306 &&
-		    dev->sdev->bus->chip_rev == 3)
+		if (dev->dev->chip_id == 0x4306 &&
+		    dev->dev->chip_rev == 3)
 			cfp_pretbtt = 100;
 		else
 			cfp_pretbtt = 50;
@@ -2907,7 +2921,7 @@
 		b43_write16(dev, 0x005E, value16);
 	}
 	b43_write32(dev, 0x0100, 0x01000000);
-	if (dev->sdev->id.revision < 5)
+	if (dev->dev->core_rev < 5)
 		b43_write32(dev, 0x010C, 0x01000000);
 
 	b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
@@ -2922,7 +2936,7 @@
 	/* Initially set the wireless operation mode. */
 	b43_adjust_opmode(dev);
 
-	if (dev->sdev->id.revision < 3) {
+	if (dev->dev->core_rev < 3) {
 		b43_write16(dev, 0x060E, 0x0000);
 		b43_write16(dev, 0x0610, 0x8000);
 		b43_write16(dev, 0x0604, 0x0000);
@@ -3105,7 +3119,7 @@
 	b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0);
 	b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4);
 
-	if ((dev->sdev->id.revision >= 3) && (dev->sdev->id.revision <= 10)) {
+	if ((dev->dev->core_rev >= 3) && (dev->dev->core_rev <= 10)) {
 		/* The 32bit register shadows the two 16bit registers
 		 * with update sideeffects. Validate this. */
 		b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA);
@@ -3954,7 +3968,7 @@
 
 	/* Disable interrupts on the device. */
 	b43_set_status(dev, B43_STAT_INITIALIZED);
-	if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+	if (b43_bus_host_is_sdio(dev->dev)) {
 		/* wl->mutex is locked. That is enough. */
 		b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
 		b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);	/* Flush */
@@ -3967,11 +3981,11 @@
 	/* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
 	orig_dev = dev;
 	mutex_unlock(&wl->mutex);
-	if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+	if (b43_bus_host_is_sdio(dev->dev)) {
 		b43_sdio_free_irq(dev);
 	} else {
-		synchronize_irq(dev->sdev->irq);
-		free_irq(dev->sdev->irq, dev);
+		synchronize_irq(dev->dev->irq);
+		free_irq(dev->dev->irq, dev);
 	}
 	mutex_lock(&wl->mutex);
 	dev = wl->current_dev;
@@ -4004,19 +4018,19 @@
 	B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
 
 	drain_txstatus_queue(dev);
-	if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+	if (b43_bus_host_is_sdio(dev->dev)) {
 		err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
 		if (err) {
 			b43err(dev->wl, "Cannot request SDIO IRQ\n");
 			goto out;
 		}
 	} else {
-		err = request_threaded_irq(dev->sdev->irq, b43_interrupt_handler,
+		err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
 					   b43_interrupt_thread_handler,
 					   IRQF_SHARED, KBUILD_MODNAME, dev);
 		if (err) {
 			b43err(dev->wl, "Cannot request IRQ-%d\n",
-			       dev->sdev->irq);
+			       dev->dev->irq);
 			goto out;
 		}
 	}
@@ -4085,7 +4099,7 @@
 #endif
 	default:
 		unsupported = 1;
-	};
+	}
 	if (unsupported) {
 		b43err(dev->wl, "FOUND UNSUPPORTED PHY "
 		       "(Analog %u, Type %u, Revision %u)\n",
@@ -4096,10 +4110,10 @@
 	       analog_type, phy_type, phy_rev);
 
 	/* Get RADIO versioning */
-	if (dev->sdev->bus->chip_id == 0x4317) {
-		if (dev->sdev->bus->chip_rev == 0)
+	if (dev->dev->chip_id == 0x4317) {
+		if (dev->dev->chip_rev == 0)
 			tmp = 0x3205017F;
-		else if (dev->sdev->bus->chip_rev == 1)
+		else if (dev->dev->chip_rev == 1)
 			tmp = 0x4205017F;
 		else
 			tmp = 0x5205017F;
@@ -4204,7 +4218,7 @@
 
 static void b43_bluetooth_coext_enable(struct b43_wldev *dev)
 {
-	struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	u64 hf;
 
 	if (!modparam_btcoex)
@@ -4231,16 +4245,21 @@
 
 static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus;
 	u32 tmp;
 
+	if (dev->dev->bus_type != B43_BUS_SSB)
+		return;
+
+	bus = dev->dev->sdev->bus;
+
 	if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) ||
 	    (bus->chip_id == 0x4312)) {
-		tmp = ssb_read32(dev->sdev, SSB_IMCFGLO);
+		tmp = ssb_read32(dev->dev->sdev, SSB_IMCFGLO);
 		tmp &= ~SSB_IMCFGLO_REQTO;
 		tmp &= ~SSB_IMCFGLO_SERTO;
 		tmp |= 0x3;
-		ssb_write32(dev->sdev, SSB_IMCFGLO, tmp);
+		ssb_write32(dev->dev->sdev, SSB_IMCFGLO, tmp);
 		ssb_commit_settings(bus);
 	}
 }
@@ -4310,29 +4329,26 @@
 		dev->wl->current_beacon = NULL;
 	}
 
-	ssb_device_disable(dev->sdev, 0);
-	ssb_bus_may_powerdown(dev->sdev->bus);
+	b43_device_disable(dev, 0);
+	b43_bus_may_powerdown(dev);
 }
 
 /* Initialize a wireless core */
 static int b43_wireless_core_init(struct b43_wldev *dev)
 {
 	struct ssb_bus *bus = dev->sdev->bus;
-	struct ssb_sprom *sprom = &bus->sprom;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 	int err;
 	u64 hf;
-	u32 tmp;
 
 	B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
 
-	err = ssb_bus_powerup(bus, 0);
+	err = b43_bus_powerup(dev, 0);
 	if (err)
 		goto out;
-	if (!ssb_device_is_enabled(dev->sdev)) {
-		tmp = phy->gmode ? B43_TMSLOW_GMODE : 0;
-		b43_wireless_core_reset(dev, tmp);
-	}
+	if (!b43_device_is_enabled(dev))
+		b43_wireless_core_reset(dev, phy->gmode);
 
 	/* Reset all data structures. */
 	setup_struct_wldev_for_init(dev);
@@ -4352,7 +4368,7 @@
 	if (err)
 		goto err_busdown;
 	b43_shm_write16(dev, B43_SHM_SHARED,
-			B43_SHM_SH_WLCOREREV, dev->sdev->id.revision);
+			B43_SHM_SH_WLCOREREV, dev->dev->core_rev);
 	hf = b43_hf_read(dev);
 	if (phy->type == B43_PHYTYPE_G) {
 		hf |= B43_HF_SYMW;
@@ -4399,8 +4415,8 @@
 	/* Maximum Contention Window */
 	b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
 
-	if ((dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
-	    (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) ||
+	if (b43_bus_host_is_pcmcia(dev->dev) ||
+	    b43_bus_host_is_sdio(dev->dev) ||
 	    dev->use_pio) {
 		dev->__using_pio_transfers = 1;
 		err = b43_pio_init(dev);
@@ -4414,7 +4430,7 @@
 	b43_set_synth_pu_delay(dev, 1);
 	b43_bluetooth_coext_enable(dev);
 
-	ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
+	b43_bus_powerup(dev, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
 	b43_upload_card_macaddress(dev);
 	b43_security_init(dev);
 
@@ -4431,7 +4447,7 @@
 err_chip_exit:
 	b43_chip_exit(dev);
 err_busdown:
-	ssb_bus_may_powerdown(bus);
+	b43_bus_may_powerdown(dev);
 	B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
 	return err;
 }
@@ -4741,7 +4757,6 @@
 	struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
 	int err;
 	bool have_2ghz_phy = 0, have_5ghz_phy = 0;
-	u32 tmp;
 
 	/* Do NOT do any device initialization here.
 	 * Do it in wireless_core_init() instead.
@@ -4750,13 +4765,13 @@
 	 * that in core_init(), too.
 	 */
 
-	err = ssb_bus_powerup(bus, 0);
+	err = b43_bus_powerup(dev, 0);
 	if (err) {
 		b43err(wl, "Bus powerup failed\n");
 		goto out;
 	}
 	/* Get the PHY type. */
-	if (dev->sdev->id.revision >= 5) {
+	if (dev->dev->core_rev >= 5) {
 		u32 tmshigh;
 
 		tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
@@ -4767,8 +4782,7 @@
 
 	dev->phy.gmode = have_2ghz_phy;
 	dev->phy.radio_on = 1;
-	tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
-	b43_wireless_core_reset(dev, tmp);
+	b43_wireless_core_reset(dev, dev->phy.gmode);
 
 	err = b43_phy_versioning(dev);
 	if (err)
@@ -4816,8 +4830,7 @@
 		goto err_powerdown;
 
 	dev->phy.gmode = have_2ghz_phy;
-	tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
-	b43_wireless_core_reset(dev, tmp);
+	b43_wireless_core_reset(dev, dev->phy.gmode);
 
 	err = b43_validate_chipaccess(dev);
 	if (err)
@@ -4832,8 +4845,8 @@
 	INIT_WORK(&dev->restart_work, b43_chip_reset);
 
 	dev->phy.ops->switch_analog(dev, 0);
-	ssb_device_disable(dev->sdev, 0);
-	ssb_bus_may_powerdown(bus);
+	b43_device_disable(dev, 0);
+	b43_bus_may_powerdown(dev);
 
 out:
 	return err;
@@ -4841,11 +4854,11 @@
 err_phy_free:
 	b43_phy_free(dev);
 err_powerdown:
-	ssb_bus_may_powerdown(bus);
+	b43_bus_may_powerdown(dev);
 	return err;
 }
 
-static void b43_one_core_detach(struct ssb_device *dev)
+static void b43_one_core_detach(struct b43_bus_dev *dev)
 {
 	struct b43_wldev *wldev;
 	struct b43_wl *wl;
@@ -4853,17 +4866,17 @@
 	/* Do not cancel ieee80211-workqueue based work here.
 	 * See comment in b43_remove(). */
 
-	wldev = ssb_get_drvdata(dev);
+	wldev = ssb_get_drvdata(dev->sdev);
 	wl = wldev->wl;
 	b43_debugfs_remove_device(wldev);
 	b43_wireless_core_detach(wldev);
 	list_del(&wldev->list);
 	wl->nr_devs--;
-	ssb_set_drvdata(dev, NULL);
+	ssb_set_drvdata(dev->sdev, NULL);
 	kfree(wldev);
 }
 
-static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
+static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
 {
 	struct b43_wldev *wldev;
 	int err = -ENOMEM;
@@ -4873,7 +4886,8 @@
 		goto out;
 
 	wldev->use_pio = b43_modparam_pio;
-	wldev->sdev = dev;
+	wldev->dev = dev;
+	wldev->sdev = dev->sdev; /* TODO: Remove when not needed */
 	wldev->wl = wl;
 	b43_set_status(wldev, B43_STAT_UNINIT);
 	wldev->bad_frames_preempt = modparam_bad_frames_preempt;
@@ -4885,7 +4899,7 @@
 
 	list_add(&wldev->list, &wl->devlist);
 	wl->nr_devs++;
-	ssb_set_drvdata(dev, wldev);
+	ssb_set_drvdata(dev->sdev, wldev);
 	b43_debugfs_add_device(wldev);
 
       out:
@@ -4926,11 +4940,11 @@
 	}
 }
 
-static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl)
+static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
 {
 	struct ieee80211_hw *hw = wl->hw;
 
-	ssb_set_devtypedata(dev, NULL);
+	ssb_set_devtypedata(dev->sdev, NULL);
 	ieee80211_free_hw(hw);
 }
 
@@ -4982,24 +4996,48 @@
 	return wl;
 }
 
-static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
+#ifdef CONFIG_B43_BCMA
+static int b43_bcma_probe(struct bcma_device *core)
 {
+	b43err(NULL, "BCMA is not supported yet!");
+	return -EOPNOTSUPP;
+}
+
+static void b43_bcma_remove(struct bcma_device *core)
+{
+	/* TODO */
+}
+
+static struct bcma_driver b43_bcma_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= b43_bcma_tbl,
+	.probe		= b43_bcma_probe,
+	.remove		= b43_bcma_remove,
+};
+#endif
+
+static
+int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
+{
+	struct b43_bus_dev *dev;
 	struct b43_wl *wl;
 	int err;
 	int first = 0;
 
-	wl = ssb_get_devtypedata(dev);
+	dev = b43_bus_dev_ssb_init(sdev);
+
+	wl = ssb_get_devtypedata(sdev);
 	if (!wl) {
 		/* Probing the first core. Must setup common struct b43_wl */
 		first = 1;
-		b43_sprom_fixup(dev->bus);
-		wl = b43_wireless_init(dev);
+		b43_sprom_fixup(sdev->bus);
+		wl = b43_wireless_init(sdev);
 		if (IS_ERR(wl)) {
 			err = PTR_ERR(wl);
 			goto out;
 		}
-		ssb_set_devtypedata(dev, wl);
-		B43_WARN_ON(ssb_get_devtypedata(dev) != wl);
+		ssb_set_devtypedata(sdev, wl);
+		B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
 	}
 	err = b43_one_core_attach(dev, wl);
 	if (err)
@@ -5023,10 +5061,10 @@
 	return err;
 }
 
-static void b43_ssb_remove(struct ssb_device *dev)
+static void b43_ssb_remove(struct ssb_device *sdev)
 {
-	struct b43_wl *wl = ssb_get_devtypedata(dev);
-	struct b43_wldev *wldev = ssb_get_drvdata(dev);
+	struct b43_wl *wl = ssb_get_devtypedata(sdev);
+	struct b43_wldev *wldev = ssb_get_drvdata(sdev);
 
 	/* We must cancel any work here before unregistering from ieee80211,
 	 * as the ieee80211 unreg will destroy the workqueue. */
@@ -5042,14 +5080,14 @@
 		ieee80211_unregister_hw(wl->hw);
 	}
 
-	b43_one_core_detach(dev);
+	b43_one_core_detach(wldev->dev);
 
 	if (list_empty(&wl->devlist)) {
 		b43_leds_unregister(wl);
 		/* Last core on the chip unregistered.
 		 * We can destroy common struct b43_wl.
 		 */
-		b43_wireless_exit(dev, wl);
+		b43_wireless_exit(wldev->dev, wl);
 	}
 }
 
@@ -5108,14 +5146,23 @@
 	err = b43_sdio_init();
 	if (err)
 		goto err_pcmcia_exit;
-	err = ssb_driver_register(&b43_ssb_driver);
+#ifdef CONFIG_B43_BCMA
+	err = bcma_driver_register(&b43_bcma_driver);
 	if (err)
 		goto err_sdio_exit;
+#endif
+	err = ssb_driver_register(&b43_ssb_driver);
+	if (err)
+		goto err_bcma_driver_exit;
 	b43_print_driverinfo();
 
 	return err;
 
+err_bcma_driver_exit:
+#ifdef CONFIG_B43_BCMA
+	bcma_driver_unregister(&b43_bcma_driver);
 err_sdio_exit:
+#endif
 	b43_sdio_exit();
 err_pcmcia_exit:
 	b43_pcmcia_exit();
@@ -5127,6 +5174,9 @@
 static void __exit b43_exit(void)
 {
 	ssb_driver_unregister(&b43_ssb_driver);
+#ifdef CONFIG_B43_BCMA
+	bcma_driver_unregister(&b43_bcma_driver);
+#endif
 	b43_sdio_exit();
 	b43_pcmcia_exit();
 	b43_debugfs_exit();
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index a0d327f..e4ebce9 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -121,7 +121,7 @@
 
 void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on);
 
-void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags);
+void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode);
 
 void b43_controller_restart(struct b43_wldev *dev, const char *reason);
 
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index b01c8ce..73ace55 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -265,7 +265,6 @@
 
 void b43_phy_inita(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	struct b43_phy *phy = &dev->phy;
 
 	/* This lowlevel A-PHY init is also called from G-PHY init.
@@ -296,9 +295,9 @@
 
 		b43_radio_init2060(dev);
 
-		if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-		    ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
-		     (bus->boardinfo.type == SSB_BOARD_BU4309))) {
+		if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+		    ((dev->dev->board_type == SSB_BOARD_BU4306) ||
+		     (dev->dev->board_type == SSB_BOARD_BU4309))) {
 			; //TODO: A PHY LO
 		}
 
@@ -311,7 +310,7 @@
 	}
 
 	if ((phy->type == B43_PHYTYPE_G) &&
-	    (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
+	    (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)) {
 		b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
 	}
 }
@@ -323,17 +322,17 @@
 	struct b43_phy_a *aphy = phy->a;
 	s16 pab0, pab1, pab2;
 
-	pab0 = (s16) (dev->sdev->bus->sprom.pa1b0);
-	pab1 = (s16) (dev->sdev->bus->sprom.pa1b1);
-	pab2 = (s16) (dev->sdev->bus->sprom.pa1b2);
+	pab0 = (s16) (dev->dev->bus_sprom->pa1b0);
+	pab1 = (s16) (dev->dev->bus_sprom->pa1b1);
+	pab2 = (s16) (dev->dev->bus_sprom->pa1b2);
 
 	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
 	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
 		/* The pabX values are set in SPROM. Use them. */
-		if ((s8) dev->sdev->bus->sprom.itssi_a != 0 &&
-		    (s8) dev->sdev->bus->sprom.itssi_a != -1)
+		if ((s8) dev->dev->bus_sprom->itssi_a != 0 &&
+		    (s8) dev->dev->bus_sprom->itssi_a != -1)
 			aphy->tgt_idle_tssi =
-			    (s8) (dev->sdev->bus->sprom.itssi_a);
+			    (s8) (dev->dev->bus_sprom->itssi_a);
 		else
 			aphy->tgt_idle_tssi = 62;
 		aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index e46b2f4..425af28 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -168,7 +168,7 @@
 	B43_WARN_ON(dev->phy.phy_locked);
 	dev->phy.phy_locked = 1;
 #endif
-	B43_WARN_ON(dev->sdev->id.revision < 3);
+	B43_WARN_ON(dev->dev->core_rev < 3);
 
 	if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
 		b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
@@ -180,7 +180,7 @@
 	B43_WARN_ON(!dev->phy.phy_locked);
 	dev->phy.phy_locked = 0;
 #endif
-	B43_WARN_ON(dev->sdev->id.revision < 3);
+	B43_WARN_ON(dev->dev->core_rev < 3);
 
 	if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
 		b43_power_saving_ctl_bits(dev, 0);
@@ -368,8 +368,8 @@
 	/* The next check will be needed in two seconds, or later. */
 	phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
 
-	if ((dev->sdev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-	    (dev->sdev->bus->boardinfo.type == SSB_BOARD_BU4306))
+	if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+	    (dev->dev->board_type == SSB_BOARD_BU4306))
 		return; /* No software txpower adjustment needed */
 
 	result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 1758a28..83532d1 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -718,7 +718,7 @@
 	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
 
 	if (!phy->gmode ||
-	    !(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+	    !(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) {
 		tmp16 = b43_nrssi_hw_read(dev, 0x20);
 		if (tmp16 >= 0x20)
 			tmp16 -= 0x40;
@@ -1114,7 +1114,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
-	struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	if (!phy->gmode)
 		return 0;
@@ -1491,7 +1491,6 @@
 
 static void b43_phy_initb5(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
 	u16 offset, value;
@@ -1500,8 +1499,8 @@
 	if (phy->analog == 1) {
 		b43_radio_set(dev, 0x007A, 0x0050);
 	}
-	if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
-	    (bus->boardinfo.type != SSB_BOARD_BU4306)) {
+	if ((dev->dev->board_vendor != SSB_BOARDVENDOR_BCM) &&
+	    (dev->dev->board_type != SSB_BOARD_BU4306)) {
 		value = 0x2120;
 		for (offset = 0x00A8; offset < 0x00C7; offset++) {
 			b43_phy_write(dev, offset, value);
@@ -1620,7 +1619,7 @@
 		b43_radio_write16(dev, 0x5A, 0x88);
 		b43_radio_write16(dev, 0x5B, 0x6B);
 		b43_radio_write16(dev, 0x5C, 0x0F);
-		if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
+		if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_ALTIQ) {
 			b43_radio_write16(dev, 0x5D, 0xFA);
 			b43_radio_write16(dev, 0x5E, 0xD8);
 		} else {
@@ -1787,7 +1786,7 @@
 	b43_phy_set(dev, B43_PHY_RFOVER, 0x0100);
 	b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF);
 
-	if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
+	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_EXTLNA) {
 		if (phy->rev >= 7) {
 			b43_phy_set(dev, B43_PHY_RFOVER, 0x0800);
 			b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000);
@@ -1922,7 +1921,6 @@
 /* Initialize B/G PHY power control */
 static void b43_phy_init_pctl(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
 	struct b43_rfatt old_rfatt;
@@ -1931,8 +1929,8 @@
 
 	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
 
-	if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
-	    (bus->boardinfo.type == SSB_BOARD_BU4306))
+	if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+	    (dev->dev->board_type == SSB_BOARD_BU4306))
 		return;
 
 	b43_phy_write(dev, 0x0028, 0x8018);
@@ -2053,7 +2051,7 @@
 	if (phy->rev >= 6) {
 		b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12));
 	}
-	if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
 		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
 	else
 		b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
@@ -2066,7 +2064,7 @@
 		b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
 	}
 
-	if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+	if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) {
 		/* The specs state to update the NRSSI LT with
 		 * the value 0x7FFFFFFF here. I think that is some weird
 		 * compiler optimization in the original driver.
@@ -2088,8 +2086,8 @@
 	/* FIXME: The spec says in the following if, the 0 should be replaced
 	   'if OFDM may not be used in the current locale'
 	   but OFDM is legal everywhere */
-	if ((dev->sdev->bus->chip_id == 0x4306
-	     && dev->sdev->bus->chip_package == 2) || 0) {
+	if ((dev->dev->chip_id == 0x4306
+	     && dev->dev->chip_pkg == 2) || 0) {
 		b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF);
 		b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF);
 	}
@@ -2105,7 +2103,7 @@
 	b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
 
 	if (channel == 14) {
-		if (dev->sdev->bus->sprom.country_code ==
+		if (dev->dev->bus_sprom->country_code ==
 		    SSB_SPROM1CCODE_JAPAN)
 			b43_hf_write(dev,
 				     b43_hf_read(dev) & ~B43_HF_ACPR);
@@ -2136,17 +2134,17 @@
 static void default_radio_attenuation(struct b43_wldev *dev,
 				      struct b43_rfatt *rf)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct b43_bus_dev *bdev = dev->dev;
 	struct b43_phy *phy = &dev->phy;
 
 	rf->with_padmix = 0;
 
-	if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
-	    bus->boardinfo.type == SSB_BOARD_BCM4309G) {
-		if (bus->boardinfo.rev < 0x43) {
+	if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
+	    dev->dev->board_type == SSB_BOARD_BCM4309G) {
+		if (dev->dev->board_rev < 0x43) {
 			rf->att = 2;
 			return;
-		} else if (bus->boardinfo.rev < 0x51) {
+		} else if (dev->dev->board_rev < 0x51) {
 			rf->att = 3;
 			return;
 		}
@@ -2172,21 +2170,21 @@
 			return;
 		case 1:
 			if (phy->type == B43_PHYTYPE_G) {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
+				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+				    && bdev->board_type == SSB_BOARD_BCM4309G
+				    && bdev->board_rev >= 30)
 					rf->att = 3;
-				else if (bus->boardinfo.vendor ==
+				else if (bdev->board_vendor ==
 					 SSB_BOARDVENDOR_BCM
-					 && bus->boardinfo.type ==
+					 && bdev->board_type ==
 					 SSB_BOARD_BU4306)
 					rf->att = 3;
 				else
 					rf->att = 1;
 			} else {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
+				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+				    && bdev->board_type == SSB_BOARD_BCM4309G
+				    && bdev->board_rev >= 30)
 					rf->att = 7;
 				else
 					rf->att = 6;
@@ -2194,16 +2192,16 @@
 			return;
 		case 2:
 			if (phy->type == B43_PHYTYPE_G) {
-				if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
-				    && bus->boardinfo.type == SSB_BOARD_BCM4309G
-				    && bus->boardinfo.rev >= 30)
+				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+				    && bdev->board_type == SSB_BOARD_BCM4309G
+				    && bdev->board_rev >= 30)
 					rf->att = 3;
-				else if (bus->boardinfo.vendor ==
+				else if (bdev->board_vendor ==
 					 SSB_BOARDVENDOR_BCM
-					 && bus->boardinfo.type ==
+					 && bdev->board_type ==
 					 SSB_BOARD_BU4306)
 					rf->att = 5;
-				else if (bus->chip_id == 0x4320)
+				else if (bdev->chip_id == 0x4320)
 					rf->att = 4;
 				else
 					rf->att = 3;
@@ -2384,11 +2382,11 @@
 	struct b43_phy_g *gphy = phy->g;
 	s16 pab0, pab1, pab2;
 
-	pab0 = (s16) (dev->sdev->bus->sprom.pa0b0);
-	pab1 = (s16) (dev->sdev->bus->sprom.pa0b1);
-	pab2 = (s16) (dev->sdev->bus->sprom.pa0b2);
+	pab0 = (s16) (dev->dev->bus_sprom->pa0b0);
+	pab1 = (s16) (dev->dev->bus_sprom->pa0b1);
+	pab2 = (s16) (dev->dev->bus_sprom->pa0b2);
 
-	B43_WARN_ON((dev->sdev->bus->chip_id == 0x4301) &&
+	B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
 		    (phy->radio_ver != 0x2050)); /* Not supported anymore */
 
 	gphy->dyn_tssi_tbl = 0;
@@ -2396,10 +2394,10 @@
 	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
 	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
 		/* The pabX values are set in SPROM. Use them. */
-		if ((s8) dev->sdev->bus->sprom.itssi_bg != 0 &&
-		    (s8) dev->sdev->bus->sprom.itssi_bg != -1) {
+		if ((s8) dev->dev->bus_sprom->itssi_bg != 0 &&
+		    (s8) dev->dev->bus_sprom->itssi_bg != -1) {
 			gphy->tgt_idle_tssi =
-				(s8) (dev->sdev->bus->sprom.itssi_bg);
+				(s8) (dev->dev->bus_sprom->itssi_bg);
 		} else
 			gphy->tgt_idle_tssi = 62;
 		gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
@@ -2537,7 +2535,7 @@
 		b43_wireless_core_reset(dev, 0);
 		b43_phy_initg(dev);
 		phy->gmode = 1;
-		b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
+		b43_wireless_core_reset(dev, 1);
 	}
 
 	return 0;
@@ -2840,7 +2838,7 @@
 				    B43_TXCTL_TXMIX;
 				rfatt += 2;
 				bbatt += 2;
-			} else if (dev->sdev->bus->sprom.
+			} else if (dev->dev->bus_sprom->
 				   boardflags_lo &
 				   B43_BFL_PACTRL) {
 				bbatt += 4 * (rfatt - 2);
@@ -2914,14 +2912,14 @@
 	estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
 
 	B43_WARN_ON(phy->type != B43_PHYTYPE_G);
-	max_pwr = dev->sdev->bus->sprom.maxpwr_bg;
-	if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+	max_pwr = dev->dev->bus_sprom->maxpwr_bg;
+	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
 		max_pwr -= 3; /* minus 0.75 */
 	if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
 		b43warn(dev->wl,
 			"Invalid max-TX-power value in SPROM.\n");
 		max_pwr = INT_TO_Q52(20); /* fake it */
-		dev->sdev->bus->sprom.maxpwr_bg = max_pwr;
+		dev->dev->bus_sprom->maxpwr_bg = max_pwr;
 	}
 
 	/* Get desired power (in Q5.2) */
@@ -3014,7 +3012,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 
-	if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
+	if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI))
 		return;
 
 	b43_mac_suspend(dev);
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 012c8da..daec1d9 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -85,39 +85,39 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
 static void lpphy_read_band_sprom(struct b43_wldev *dev)
 {
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct ssb_bus *bus = dev->sdev->bus;
 	u16 cckpo, maxpwr;
 	u32 ofdmpo;
 	int i;
 
 	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-		lpphy->tx_isolation_med_band = bus->sprom.tri2g;
-		lpphy->bx_arch = bus->sprom.bxa2g;
-		lpphy->rx_pwr_offset = bus->sprom.rxpo2g;
-		lpphy->rssi_vf = bus->sprom.rssismf2g;
-		lpphy->rssi_vc = bus->sprom.rssismc2g;
-		lpphy->rssi_gs = bus->sprom.rssisav2g;
-		lpphy->txpa[0] = bus->sprom.pa0b0;
-		lpphy->txpa[1] = bus->sprom.pa0b1;
-		lpphy->txpa[2] = bus->sprom.pa0b2;
-		maxpwr = bus->sprom.maxpwr_bg;
+		lpphy->tx_isolation_med_band = sprom->tri2g;
+		lpphy->bx_arch = sprom->bxa2g;
+		lpphy->rx_pwr_offset = sprom->rxpo2g;
+		lpphy->rssi_vf = sprom->rssismf2g;
+		lpphy->rssi_vc = sprom->rssismc2g;
+		lpphy->rssi_gs = sprom->rssisav2g;
+		lpphy->txpa[0] = sprom->pa0b0;
+		lpphy->txpa[1] = sprom->pa0b1;
+		lpphy->txpa[2] = sprom->pa0b2;
+		maxpwr = sprom->maxpwr_bg;
 		lpphy->max_tx_pwr_med_band = maxpwr;
-		cckpo = bus->sprom.cck2gpo;
+		cckpo = sprom->cck2gpo;
 		/*
 		 * We don't read SPROM's opo as specs say. On rev8 SPROMs
 		 * opo == ofdm2gpo and we don't know any SSB with LP-PHY
 		 * and SPROM rev below 8.
 		 */
-		B43_WARN_ON(bus->sprom.revision < 8);
-		ofdmpo = bus->sprom.ofdm2gpo;
+		B43_WARN_ON(sprom->revision < 8);
+		ofdmpo = sprom->ofdm2gpo;
 		if (cckpo) {
 			for (i = 0; i < 4; i++) {
 				lpphy->tx_max_rate[i] =
 					maxpwr - (ofdmpo & 0xF) * 2;
 				ofdmpo >>= 4;
 			}
-			ofdmpo = bus->sprom.ofdm2gpo;
+			ofdmpo = sprom->ofdm2gpo;
 			for (i = 4; i < 15; i++) {
 				lpphy->tx_max_rate[i] =
 					maxpwr - (ofdmpo & 0xF) * 2;
@@ -131,39 +131,39 @@
 				lpphy->tx_max_rate[i] = maxpwr - ofdmpo;
 		}
 	} else { /* 5GHz */
-		lpphy->tx_isolation_low_band = bus->sprom.tri5gl;
-		lpphy->tx_isolation_med_band = bus->sprom.tri5g;
-		lpphy->tx_isolation_hi_band = bus->sprom.tri5gh;
-		lpphy->bx_arch = bus->sprom.bxa5g;
-		lpphy->rx_pwr_offset = bus->sprom.rxpo5g;
-		lpphy->rssi_vf = bus->sprom.rssismf5g;
-		lpphy->rssi_vc = bus->sprom.rssismc5g;
-		lpphy->rssi_gs = bus->sprom.rssisav5g;
-		lpphy->txpa[0] = bus->sprom.pa1b0;
-		lpphy->txpa[1] = bus->sprom.pa1b1;
-		lpphy->txpa[2] = bus->sprom.pa1b2;
-		lpphy->txpal[0] = bus->sprom.pa1lob0;
-		lpphy->txpal[1] = bus->sprom.pa1lob1;
-		lpphy->txpal[2] = bus->sprom.pa1lob2;
-		lpphy->txpah[0] = bus->sprom.pa1hib0;
-		lpphy->txpah[1] = bus->sprom.pa1hib1;
-		lpphy->txpah[2] = bus->sprom.pa1hib2;
-		maxpwr = bus->sprom.maxpwr_al;
-		ofdmpo = bus->sprom.ofdm5glpo;
+		lpphy->tx_isolation_low_band = sprom->tri5gl;
+		lpphy->tx_isolation_med_band = sprom->tri5g;
+		lpphy->tx_isolation_hi_band = sprom->tri5gh;
+		lpphy->bx_arch = sprom->bxa5g;
+		lpphy->rx_pwr_offset = sprom->rxpo5g;
+		lpphy->rssi_vf = sprom->rssismf5g;
+		lpphy->rssi_vc = sprom->rssismc5g;
+		lpphy->rssi_gs = sprom->rssisav5g;
+		lpphy->txpa[0] = sprom->pa1b0;
+		lpphy->txpa[1] = sprom->pa1b1;
+		lpphy->txpa[2] = sprom->pa1b2;
+		lpphy->txpal[0] = sprom->pa1lob0;
+		lpphy->txpal[1] = sprom->pa1lob1;
+		lpphy->txpal[2] = sprom->pa1lob2;
+		lpphy->txpah[0] = sprom->pa1hib0;
+		lpphy->txpah[1] = sprom->pa1hib1;
+		lpphy->txpah[2] = sprom->pa1hib2;
+		maxpwr = sprom->maxpwr_al;
+		ofdmpo = sprom->ofdm5glpo;
 		lpphy->max_tx_pwr_low_band = maxpwr;
 		for (i = 4; i < 12; i++) {
 			lpphy->tx_max_ratel[i] = maxpwr - (ofdmpo & 0xF) * 2;
 			ofdmpo >>= 4;
 		}
-		maxpwr = bus->sprom.maxpwr_a;
-		ofdmpo = bus->sprom.ofdm5gpo;
+		maxpwr = sprom->maxpwr_a;
+		ofdmpo = sprom->ofdm5gpo;
 		lpphy->max_tx_pwr_med_band = maxpwr;
 		for (i = 4; i < 12; i++) {
 			lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2;
 			ofdmpo >>= 4;
 		}
-		maxpwr = bus->sprom.maxpwr_ah;
-		ofdmpo = bus->sprom.ofdm5ghpo;
+		maxpwr = sprom->maxpwr_ah;
+		ofdmpo = sprom->ofdm5ghpo;
 		lpphy->max_tx_pwr_hi_band = maxpwr;
 		for (i = 4; i < 12; i++) {
 			lpphy->tx_max_rateh[i] = maxpwr - (ofdmpo & 0xF) * 2;
@@ -214,7 +214,8 @@
 
 static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy_lp *lpphy = dev->phy.lp;
 	u16 tmp, tmp2;
 
@@ -242,9 +243,9 @@
 	b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00);
 	b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
 			0xFF00, lpphy->rx_pwr_offset);
-	if ((bus->sprom.boardflags_lo & B43_BFL_FEM) &&
+	if ((sprom->boardflags_lo & B43_BFL_FEM) &&
 	   ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
-	   (bus->sprom.boardflags_hi & B43_BFH_PAREF))) {
+	   (sprom->boardflags_hi & B43_BFH_PAREF))) {
 		ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
 		ssb_pmu_set_ldo_paref(&bus->chipco, true);
 		if (dev->phy.rev == 0) {
@@ -260,7 +261,7 @@
 	}
 	tmp = lpphy->rssi_vf | lpphy->rssi_vc << 4 | 0xA000;
 	b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, tmp);
-	if (bus->sprom.boardflags_hi & B43_BFH_RSSIINV)
+	if (sprom->boardflags_hi & B43_BFH_RSSIINV)
 		b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x0AAA);
 	else
 		b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x02AA);
@@ -268,7 +269,7 @@
 	b43_phy_maskset(dev, B43_LPPHY_RX_RADIO_CTL,
 			0xFFF9, (lpphy->bx_arch << 1));
 	if (dev->phy.rev == 1 &&
-	   (bus->sprom.boardflags_hi & B43_BFH_FEM_BT)) {
+	   (sprom->boardflags_hi & B43_BFH_FEM_BT)) {
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0x3F00, 0x0900);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A);
@@ -286,8 +287,8 @@
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
 	} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
-		  (bus->boardinfo.type == 0x048A) || ((dev->phy.rev == 0) &&
-		  (bus->sprom.boardflags_lo & B43_BFL_FEM))) {
+		  (dev->dev->board_type == 0x048A) || ((dev->phy.rev == 0) &&
+		  (sprom->boardflags_lo & B43_BFL_FEM))) {
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001);
@@ -297,7 +298,7 @@
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0A00);
 	} else if (dev->phy.rev == 1 ||
-		  (bus->sprom.boardflags_lo & B43_BFL_FEM)) {
+		  (sprom->boardflags_lo & B43_BFL_FEM)) {
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0004);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0800);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0004);
@@ -316,15 +317,15 @@
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0006);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0700);
 	}
-	if (dev->phy.rev == 1 && (bus->sprom.boardflags_hi & B43_BFH_PAREF)) {
+	if (dev->phy.rev == 1 && (sprom->boardflags_hi & B43_BFH_PAREF)) {
 		b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_5, B43_LPPHY_TR_LOOKUP_1);
 		b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_6, B43_LPPHY_TR_LOOKUP_2);
 		b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_7, B43_LPPHY_TR_LOOKUP_3);
 		b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_8, B43_LPPHY_TR_LOOKUP_4);
 	}
-	if ((bus->sprom.boardflags_hi & B43_BFH_FEM_BT) &&
-	    (bus->chip_id == 0x5354) &&
-	    (bus->chip_package == SSB_CHIPPACK_BCM4712S)) {
+	if ((sprom->boardflags_hi & B43_BFH_FEM_BT) &&
+	    (dev->dev->chip_id == 0x5354) &&
+	    (dev->dev->chip_pkg == SSB_CHIPPACK_BCM4712S)) {
 		b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0006);
 		b43_phy_write(dev, B43_LPPHY_GPIO_SELECT, 0x0005);
 		b43_phy_write(dev, B43_LPPHY_GPIO_OUTEN, 0xFFFF);
@@ -412,7 +413,6 @@
 
 static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	struct b43_phy_lp *lpphy = dev->phy.lp;
 
 	b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50);
@@ -432,7 +432,7 @@
 	b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000);
 	b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000);
 	b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1);
-	if (bus->boardinfo.rev >= 0x18) {
+	if (dev->dev->board_rev >= 0x18) {
 		b43_lptab_write(dev, B43_LPTAB32(17, 65), 0xEC);
 		b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x14);
 	} else {
@@ -449,7 +449,7 @@
 	b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0xA0);
 	b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300);
 	b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00);
-	if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+	if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
 		b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100);
 		b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA);
 	} else {
@@ -467,7 +467,7 @@
 	b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12);
 	b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000);
 
-	if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+	if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
 		b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0);
 		b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
 	}
@@ -492,7 +492,7 @@
 		      0x2000 | ((u16)lpphy->rssi_gs << 10) |
 		      ((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf);
 
-	if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+	if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
 		b43_phy_set(dev, B43_LPPHY_AFE_ADC_CTL_0, 0x1C);
 		b43_phy_maskset(dev, B43_LPPHY_AFE_CTL, 0x00FF, 0x8800);
 		b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_1, 0xFC3C, 0x0400);
@@ -519,7 +519,7 @@
 static void lpphy_2062_init(struct b43_wldev *dev)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 	u32 crystalfreq, tmp, ref;
 	unsigned int i;
 	const struct b2062_freqdata *fd = NULL;
@@ -697,7 +697,7 @@
 		lpphy_sync_stx(dev);
 		b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80);
 		b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0);
-		if (dev->sdev->bus->chip_id == 0x4325) {
+		if (dev->dev->chip_id == 0x4325) {
 			// TODO SSB PMU recalibration
 		}
 	}
@@ -1289,7 +1289,7 @@
 
 static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
 	u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF;
 	int i;
@@ -1840,7 +1840,6 @@
 static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct ssb_bus *bus = dev->sdev->bus;
 	struct lpphy_tx_gains gains, oldgains;
 	int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
 
@@ -1854,7 +1853,7 @@
 
 	lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
 
-	if (bus->chip_id == 0x4325 && bus->chip_rev == 0)
+	if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
 		lpphy_papd_cal(dev, gains, 0, 1, 30);
 	else
 		lpphy_papd_cal(dev, gains, 0, 1, 65);
@@ -1870,7 +1869,6 @@
 			    bool rx, bool pa, struct lpphy_tx_gains *gains)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct ssb_bus *bus = dev->sdev->bus;
 	const struct lpphy_rx_iq_comp *iqcomp = NULL;
 	struct lpphy_tx_gains nogains, oldgains;
 	u16 tmp;
@@ -1879,7 +1877,7 @@
 	memset(&nogains, 0, sizeof(nogains));
 	memset(&oldgains, 0, sizeof(oldgains));
 
-	if (bus->chip_id == 0x5354) {
+	if (dev->dev->chip_id == 0x5354) {
 		for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
 			if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
 				iqcomp = &lpphy_5354_iq_table[i];
@@ -2408,11 +2406,9 @@
 
 static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
-
 	b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF);
 	udelay(20);
-	if (bus->chip_id == 0x5354) {
+	if (dev->dev->chip_id == 0x5354) {
 		b43_radio_write(dev, B2062_N_COMM1, 4);
 		b43_radio_write(dev, B2062_S_RFPLL_CTL2, 4);
 	} else {
@@ -2432,7 +2428,7 @@
 			    unsigned int channel)
 {
 	struct b43_phy_lp *lpphy = dev->phy.lp;
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 	const struct b206x_channel *chandata = NULL;
 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
 	u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;
@@ -2522,7 +2518,7 @@
 static int lpphy_b2063_tune(struct b43_wldev *dev,
 			    unsigned int channel)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 
 	static const struct b206x_channel *chandata = NULL;
 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
@@ -2670,6 +2666,11 @@
 {
 	int err;
 
+	if (dev->dev->bus_type != B43_BUS_SSB) {
+		b43err(dev->wl, "LP-PHY is supported only on SSB!\n");
+		return -EOPNOTSUPP;
+	}
+
 	lpphy_read_band_sprom(dev); //FIXME should this be in prepare_structs?
 	lpphy_baseband_init(dev);
 	lpphy_radio_init(dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 05960dd..ad14f3b 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -299,7 +299,7 @@
 static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
 {
 	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	u8 txpi[2], bbmult, i;
 	u16 tmp, radio_gain, dac_gain;
@@ -423,16 +423,15 @@
 static void b43_radio_init2055_post(struct b43_wldev *dev)
 {
 	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
-	struct ssb_boardinfo *binfo = &(dev->sdev->bus->boardinfo);
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	int i;
 	u16 val;
 	bool workaround = false;
 
 	if (sprom->revision < 4)
-		workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
-				binfo->type == 0x46D &&
-				binfo->rev >= 0x41);
+		workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM
+			      && dev->dev->board_type == 0x46D
+			      && dev->dev->board_rev >= 0x41);
 	else
 		workaround =
 			!(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -983,7 +982,7 @@
 {
 	u16 tmp;
 
-	if (dev->sdev->id.revision == 16)
+	if (dev->dev->core_rev == 16)
 		b43_mac_suspend(dev);
 
 	tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
@@ -993,7 +992,7 @@
 	tmp |= (val & mask);
 	b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
 
-	if (dev->sdev->id.revision == 16)
+	if (dev->dev->core_rev == 16)
 		b43_mac_enable(dev);
 
 	return tmp;
@@ -1168,7 +1167,7 @@
 static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
 {
 	struct b43_phy_n *nphy = dev->phy.n;
-	struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	/* PHY rev 0, 1, 2 */
 	u8 i, j;
@@ -1373,7 +1372,7 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
 static void b43_nphy_workarounds(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = phy->n;
 
@@ -1443,9 +1442,9 @@
 
 		/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
 
-		if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
+		if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
 		    b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
-		    (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
+		    (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
 		    b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
 			tmp32 = 0x00088888;
 		else
@@ -1503,8 +1502,8 @@
 		b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
 		b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
 
-		if (bus->sprom.boardflags2_lo & 0x100 &&
-		    bus->boardinfo.type == 0x8B) {
+		if (sprom->boardflags2_lo & 0x100 &&
+		    dev->dev->board_type == 0x8B) {
 			delays1[0] = 0x1;
 			delays1[5] = 0x14;
 		}
@@ -3586,7 +3585,7 @@
  */
 int b43_phy_initn(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = phy->n;
 	u8 tx_pwr_state;
@@ -3599,7 +3598,7 @@
 	bool do_cal = false;
 
 	if ((dev->phy.rev >= 3) &&
-	   (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+	   (sprom->boardflags_lo & B43_BFL_EXTLNA) &&
 	   (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
 		chipco_set32(&dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
 	}
@@ -3639,9 +3638,9 @@
 	b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
 	b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
 
-	if (bus->sprom.boardflags2_lo & 0x100 ||
-	    (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
-	     bus->boardinfo.type == 0x8B))
+	if (sprom->boardflags2_lo & 0x100 ||
+	    (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+	     dev->dev->board_type == 0x8B))
 		b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
 	else
 		b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 72ab94d..44da620 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -111,7 +111,7 @@
 		B43_MMIO_PIO11_BASE5,
 	};
 
-	if (dev->sdev->id.revision >= 11) {
+	if (dev->dev->core_rev >= 11) {
 		B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
 		return bases_rev11[index];
 	}
@@ -121,14 +121,14 @@
 
 static u16 pio_txqueue_offset(struct b43_wldev *dev)
 {
-	if (dev->sdev->id.revision >= 11)
+	if (dev->dev->core_rev >= 11)
 		return 0x18;
 	return 0;
 }
 
 static u16 pio_rxqueue_offset(struct b43_wldev *dev)
 {
-	if (dev->sdev->id.revision >= 11)
+	if (dev->dev->core_rev >= 11)
 		return 0x38;
 	return 8;
 }
@@ -144,7 +144,7 @@
 	if (!q)
 		return NULL;
 	q->dev = dev;
-	q->rev = dev->sdev->id.revision;
+	q->rev = dev->dev->core_rev;
 	q->mmio_base = index_to_pioqueue_base(dev, index) +
 		       pio_txqueue_offset(dev);
 	q->index = index;
@@ -178,7 +178,7 @@
 	if (!q)
 		return NULL;
 	q->dev = dev;
-	q->rev = dev->sdev->id.revision;
+	q->rev = dev->dev->core_rev;
 	q->mmio_base = index_to_pioqueue_base(dev, index) +
 		       pio_rxqueue_offset(dev);
 
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index a617efe..59c3afe 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -37,17 +37,16 @@
 {
 	struct b43_wl *wl = hw_to_b43_wl(hw);
 	struct b43_wldev *dev = wl->current_dev;
-	struct ssb_bus *bus = dev->sdev->bus;
 	bool enabled;
 	bool brought_up = false;
 
 	mutex_lock(&wl->mutex);
 	if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) {
-		if (ssb_bus_powerup(bus, 0)) {
+		if (b43_bus_powerup(dev, 0)) {
 			mutex_unlock(&wl->mutex);
 			return;
 		}
-		ssb_device_enable(dev->sdev, 0);
+		b43_device_enable(dev, 0);
 		brought_up = true;
 	}
 
@@ -63,8 +62,8 @@
 	}
 
 	if (brought_up) {
-		ssb_device_disable(dev->sdev, 0);
-		ssb_bus_may_powerdown(bus);
+		b43_device_disable(dev, 0);
+		b43_bus_may_powerdown(dev);
 	}
 
 	mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 808e25b..e6c733d 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -66,7 +66,7 @@
 int b43_sdio_request_irq(struct b43_wldev *dev,
 			 void (*handler)(struct b43_wldev *dev))
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 	struct sdio_func *func = bus->host_sdio;
 	struct b43_sdio *sdio = sdio_get_drvdata(func);
 	int err;
@@ -82,7 +82,7 @@
 
 void b43_sdio_free_irq(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_bus *bus = dev->dev->sdev->bus;
 	struct sdio_func *func = bus->host_sdio;
 	struct b43_sdio *sdio = sdio_get_drvdata(func);
 
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 57af619..f1ae4e0 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -140,7 +140,7 @@
 
 int b43_sysfs_register(struct b43_wldev *wldev)
 {
-	struct device *dev = wldev->sdev->dev;
+	struct device *dev = wldev->dev->dev;
 
 	B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED);
 
@@ -149,7 +149,7 @@
 
 void b43_sysfs_unregister(struct b43_wldev *wldev)
 {
-	struct device *dev = wldev->sdev->dev;
+	struct device *dev = wldev->dev->dev;
 
 	device_remove_file(dev, &dev_attr_interference);
 }
diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c
index 59df3c6..6748c5a 100644
--- a/drivers/net/wireless/b43/tables_lpphy.c
+++ b/drivers/net/wireless/b43/tables_lpphy.c
@@ -2304,7 +2304,6 @@
 
 void lpphy_rev2plus_table_init(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
 	int i;
 
 	B43_WARN_ON(dev->phy.rev < 2);
@@ -2341,7 +2340,7 @@
 	b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0),
 		ARRAY_SIZE(lpphy_papd_mult_table), lpphy_papd_mult_table);
 
-	if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+	if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
 		b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0),
 			ARRAY_SIZE(lpphy_a0_gain_idx_table), lpphy_a0_gain_idx_table);
 		b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0),
@@ -2416,12 +2415,12 @@
 
 void lpphy_init_tx_gain_table(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	switch (dev->phy.rev) {
 	case 0:
-		if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) ||
-		    (bus->sprom.boardflags_lo & B43_BFL_HGPA))
+		if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
+		    (sprom->boardflags_lo & B43_BFL_HGPA))
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev0_nopa_tx_gain_table);
 		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -2432,8 +2431,8 @@
 					lpphy_rev0_5ghz_tx_gain_table);
 		break;
 	case 1:
-		if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) ||
-		    (bus->sprom.boardflags_lo & B43_BFL_HGPA))
+		if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
+		    (sprom->boardflags_lo & B43_BFL_HGPA))
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev1_nopa_tx_gain_table);
 		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -2444,7 +2443,7 @@
 					lpphy_rev1_5ghz_tx_gain_table);
 		break;
 	default:
-		if (bus->sprom.boardflags_hi & B43_BFH_NOPA)
+		if (sprom->boardflags_hi & B43_BFH_NOPA)
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev2_nopa_tx_gain_table);
 		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 8f4db44..5d00d0e 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -458,17 +458,15 @@
 
 static void b43_wa_boards_a(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
-
-	if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
-	    bus->boardinfo.type == SSB_BOARD_BU4306 &&
-	    bus->boardinfo.rev < 0x30) {
+	if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
+	    dev->dev->board_type == SSB_BOARD_BU4306 &&
+	    dev->dev->board_rev < 0x30) {
 		b43_phy_write(dev, 0x0010, 0xE000);
 		b43_phy_write(dev, 0x0013, 0x0140);
 		b43_phy_write(dev, 0x0014, 0x0280);
 	} else {
-		if (bus->boardinfo.type == SSB_BOARD_MP4318 &&
-		    bus->boardinfo.rev < 0x20) {
+		if (dev->dev->board_type == SSB_BOARD_MP4318 &&
+		    dev->dev->board_rev < 0x20) {
 			b43_phy_write(dev, 0x0013, 0x0210);
 			b43_phy_write(dev, 0x0014, 0x0840);
 		} else {
@@ -486,19 +484,19 @@
 
 static void b43_wa_boards_g(struct b43_wldev *dev)
 {
-	struct ssb_bus *bus = dev->sdev->bus;
+	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 
-	if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM ||
-	    bus->boardinfo.type != SSB_BOARD_BU4306 ||
-	    bus->boardinfo.rev != 0x17) {
+	if (dev->dev->board_vendor != SSB_BOARDVENDOR_BCM ||
+	    dev->dev->board_type != SSB_BOARD_BU4306 ||
+	    dev->dev->board_rev != 0x17) {
 		if (phy->rev < 2) {
 			b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 1, 0x0002);
 			b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 2, 0x0001);
 		} else {
 			b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 1, 0x0002);
 			b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 2, 0x0001);
-			if ((bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+			if ((sprom->boardflags_lo & B43_BFL_EXTLNA) &&
 			    (phy->rev >= 7)) {
 				b43_phy_mask(dev, B43_PHY_EXTG(0x11), 0xF7FF);
 				b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0020, 0x0001);
@@ -510,7 +508,7 @@
 			}
 		}
 	}
-	if (bus->sprom.boardflags_lo & B43_BFL_FEM) {
+	if (sprom->boardflags_lo & B43_BFL_FEM) {
 		b43_phy_write(dev, B43_PHY_GTABCTL, 0x3120);
 		b43_phy_write(dev, B43_PHY_GTABDATA, 0xC480);
 	}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index c8f99ae..488b898 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -547,7 +547,7 @@
 			else
 				tmp -= 3;
 		} else {
-			if (dev->sdev->bus->sprom.
+			if (dev->dev->bus_sprom->
 			    boardflags_lo & B43_BFL_RSSI) {
 				if (in_rssi > 63)
 					in_rssi = 63;
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index e03e01d..c33934a 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -817,14 +817,13 @@
 
 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
 {
-	struct b43legacy_dmadesc_generic *desc;
 	struct b43legacy_dmadesc_meta *meta;
 	int i;
 
 	if (!ring->used_slots)
 		return;
 	for (i = 0; i < ring->nr_slots; i++) {
-		desc = ring->ops->idx2desc(ring, i, &meta);
+		ring->ops->idx2desc(ring, i, &meta);
 
 		if (!meta->skb) {
 			B43legacy_WARN_ON(!ring->tx);
@@ -1371,10 +1370,8 @@
 		     struct sk_buff *skb)
 {
 	struct b43legacy_dmaring *ring;
-	struct ieee80211_hdr *hdr;
 	int err = 0;
 	unsigned long flags;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
 	spin_lock_irqsave(&ring->lock, flags);
@@ -1401,8 +1398,6 @@
 
 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
 	 * into the skb data or cb now. */
-	hdr = NULL;
-	info = NULL;
 	err = dma_tx_fragment(ring, &skb);
 	if (unlikely(err == -ENOKEY)) {
 		/* Drop this packet, as we don't have the encryption key
@@ -1435,7 +1430,6 @@
 {
 	const struct b43legacy_dma_ops *ops;
 	struct b43legacy_dmaring *ring;
-	struct b43legacy_dmadesc_generic *desc;
 	struct b43legacy_dmadesc_meta *meta;
 	int retry_limit;
 	int slot;
@@ -1450,7 +1444,7 @@
 	ops = ring->ops;
 	while (1) {
 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
-		desc = ops->idx2desc(ring, slot, &meta);
+		ops->idx2desc(ring, slot, &meta);
 
 		if (meta->skb)
 			unmap_descbuffer(ring, meta->dmaaddr,
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1ab8861..d6db6c1 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1564,10 +1564,10 @@
 	struct b43legacy_firmware *fw = &dev->fw;
 	const u8 rev = dev->dev->id.revision;
 	const char *filename;
-	u32 tmshigh;
 	int err;
 
-	tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH);
+	/* do dummy read */
+	ssb_read32(dev->dev, SSB_TMSHIGH);
 	if (!fw->ucode) {
 		if (rev == 2)
 			filename = "ucode2";
@@ -2634,11 +2634,9 @@
 	unsigned long flags;
 	unsigned int new_phymode = 0xFFFF;
 	int antenna_tx;
-	int antenna_rx;
 	int err = 0;
 
 	antenna_tx = B43legacy_ANTENNA_DEFAULT;
-	antenna_rx = B43legacy_ANTENNA_DEFAULT;
 
 	mutex_lock(&wl->mutex);
 	dev = wl->current_dev;
@@ -2775,14 +2773,12 @@
 {
 	struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
 	struct b43legacy_wldev *dev;
-	struct b43legacy_phy *phy;
 	unsigned long flags;
 
 	mutex_lock(&wl->mutex);
 	B43legacy_WARN_ON(wl->vif != vif);
 
 	dev = wl->current_dev;
-	phy = &dev->phy;
 
 	/* Disable IRQs while reconfiguring the device.
 	 * This makes it possible to drop the spinlock throughout
@@ -2974,7 +2970,7 @@
 		break;
 	default:
 		unsupported = 1;
-	};
+	}
 	if (unsupported) {
 		b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY "
 		       "(Analog %u, Type %u, Revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 3a95541..6c174f3 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -321,11 +321,9 @@
 		struct ieee80211_hdr *hdr;
 		int rts_rate;
 		int rts_rate_fb;
-		int rts_rate_ofdm;
 		int rts_rate_fb_ofdm;
 
 		rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
-		rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
 		rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
 		rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
 		if (rts_rate_fb_ofdm)
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 88dc6a5..7bb0b4b 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,7 @@
 #ifndef HOSTAP_WLAN_H
 #define HOSTAP_WLAN_H
 
+#include <linux/interrupt.h>
 #include <linux/wireless.h>
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 91795b5..ecb561d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/mutex.h>
 
 #include <linux/pci.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index d7bd6cf0..6623e50 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -30,6 +30,7 @@
 
 ******************************************************************************/
 
+#include <linux/hardirq.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
index 24d1499..9b65153 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -2275,6 +2275,9 @@
 	if (rate_control_send_low(sta, priv_sta, txrc))
 		return;
 
+	if (!lq_sta)
+		return;
+
 	rate_idx  = lq_sta->last_txrate_idx;
 
 	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index facc94e..9cf96cb 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -496,7 +496,7 @@
 	    channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
 		return CALIB_CH_GROUP_4;
 
-	return -1;
+	return -EINVAL;
 }
 
 static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
@@ -915,7 +915,7 @@
 	if (txatten_grp < 0) {
 		IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
 			  channel);
-		return -EINVAL;
+		return txatten_grp;
 	}
 
 	IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
@@ -1185,8 +1185,6 @@
 
 	ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
 				     sizeof(rxon_assoc), &rxon_assoc, NULL);
-	if (ret)
-		return ret;
 
 	return ret;
 }
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index ea30122..0a8d07f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -32,6 +32,7 @@
 #ifndef __iwl_legacy_dev_h__
 #define __iwl_legacy_dev_h__
 
+#include <linux/interrupt.h>
 #include <linux/pci.h> /* for struct pci_device_id */
 #include <linux/kernel.h>
 #include <linux/leds.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index cb346d1..5bf3f49 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -316,7 +316,6 @@
 		break;
 	default:
 		BUG();
-		return;
 	}
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 61d4a11..7aa240e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -174,7 +174,6 @@
 	.rx_handler_setup = iwlagn_rx_handler_setup,
 	.setup_deferred_work = iwlagn_setup_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.apm_ops = {
 		.init = iwl_apm_init,
@@ -223,6 +222,7 @@
 static struct iwl_ht_params iwl1000_ht_params = {
 	.ht_greenfield_support = true,
 	.use_rts_for_aggregation = true, /* use rts/cts protection */
+	.smps_mode = IEEE80211_SMPS_STATIC,
 };
 
 #define IWL_DEVICE_1000						\
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 2282279..5484ab7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -183,7 +183,6 @@
 	.setup_deferred_work = iwlagn_bt_setup_deferred_work,
 	.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.apm_ops = {
 		.init = iwl_apm_init,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f99f9c1..4353a65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -340,7 +340,6 @@
 	.rx_handler_setup = iwlagn_rx_handler_setup,
 	.setup_deferred_work = iwlagn_setup_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
@@ -371,7 +370,6 @@
 	.rx_handler_setup = iwlagn_rx_handler_setup,
 	.setup_deferred_work = iwlagn_setup_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fbe565c..6e5ce44 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -279,7 +279,6 @@
 	.rx_handler_setup = iwlagn_rx_handler_setup,
 	.setup_deferred_work = iwlagn_setup_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.set_channel_switch = iwl6000_hw_channel_switch,
 	.apm_ops = {
@@ -312,7 +311,6 @@
 	.setup_deferred_work = iwlagn_bt_setup_deferred_work,
 	.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
 	.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
-	.send_tx_power = iwlagn_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.set_channel_switch = iwl6000_hw_channel_switch,
 	.apm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 23fa93d..23eee0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -302,7 +302,6 @@
 }
 
 struct iwl_hcmd_ops iwlagn_hcmd = {
-	.commit_rxon = iwlagn_commit_rxon,
 	.set_rxon_chain = iwlagn_set_rxon_chain,
 	.set_tx_ant = iwlagn_send_tx_ant_config,
 	.send_bt_config = iwl_send_bt_config,
@@ -310,7 +309,6 @@
 };
 
 struct iwl_hcmd_ops iwlagn_bt_hcmd = {
-	.commit_rxon = iwlagn_commit_rxon,
 	.set_rxon_chain = iwlagn_set_rxon_chain,
 	.set_tx_ant = iwlagn_send_tx_ant_config,
 	.send_bt_config = iwlagn_send_advance_bt_config,
@@ -324,5 +322,4 @@
 	.tx_cmd_protection = iwlagn_tx_cmd_protection,
 	.calc_rssi = iwlagn_calc_rssi,
 	.request_scan = iwlagn_request_scan,
-	.post_scan = iwlagn_post_scan,
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index f803fb6..677f73c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -408,9 +408,9 @@
 	unsigned long flags;
 
 	if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-			  "is out of range [0-%d] %d %d\n", txq_id,
-			  index, txq->q.n_bd, txq->q.write_ptr,
+		IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
+			  "index %d is out of range [0-%d] %d %d\n", __func__,
+			  txq_id, index, txq->q.n_bd, txq->q.write_ptr,
 			  txq->q.read_ptr);
 		return;
 	}
@@ -1797,6 +1797,7 @@
 		priv->cfg->ops->lib->update_chain_flags(priv);
 
 	if (smps_request != -1) {
+		priv->current_ht_config.smps = smps_request;
 		for_each_context(priv, ctx) {
 			if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
 				ieee80211_request_smps(ctx->vif, smps_request);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 592b0cf..85e0828 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -426,7 +426,7 @@
 			ieee80211_stop_tx_ba_session(sta, tid);
 		}
 	} else {
-		IWL_ERR(priv, "Aggregation not enabled for tid %d "
+		IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
 			"because load = %u\n", tid, load);
 	}
 	return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 09f679d..a7c66c4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -81,6 +81,21 @@
 	return ret;
 }
 
+static int iwlagn_disconn_pan(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx,
+			      struct iwl_rxon_cmd *send)
+{
+	__le32 old_filter = send->filter_flags;
+	int ret;
+
+	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+
+	send->filter_flags = old_filter;
+
+	return ret;
+}
+
 static void iwlagn_update_qos(struct iwl_priv *priv,
 			      struct iwl_rxon_context *ctx)
 {
@@ -163,9 +178,6 @@
 
 	ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
 				     sizeof(rxon_assoc), &rxon_assoc, NULL);
-	if (ret)
-		return ret;
-
 	return ret;
 }
 
@@ -175,10 +187,21 @@
 	int ret;
 	struct iwl_rxon_cmd *active = (void *)&ctx->active;
 
-	if (ctx->ctxid == IWL_RXON_CTX_BSS)
+	if (ctx->ctxid == IWL_RXON_CTX_BSS) {
 		ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
-	else
+	} else {
 		ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
+		if (ret)
+			return ret;
+		if (ctx->vif) {
+			ret = iwl_send_rxon_timing(priv, ctx);
+			if (ret) {
+				IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+				return ret;
+			}
+			ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
+		}
+	}
 	if (ret)
 		return ret;
 
@@ -205,10 +228,12 @@
 	struct iwl_rxon_cmd *active = (void *)&ctx->active;
 
 	/* RXON timing must be before associated RXON */
-	ret = iwl_send_rxon_timing(priv, ctx);
-	if (ret) {
-		IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
-		return ret;
+	if (ctx->ctxid == IWL_RXON_CTX_BSS) {
+		ret = iwl_send_rxon_timing(priv, ctx);
+		if (ret) {
+			IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+			return ret;
+		}
 	}
 	/* QoS info may be cleared by previous un-assoc RXON */
 	iwlagn_update_qos(priv, ctx);
@@ -263,6 +288,12 @@
 		IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
 		return ret;
 	}
+
+	if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) &&
+	    priv->cfg->ht_params->smps_mode)
+		ieee80211_request_smps(ctx->vif,
+				       priv->cfg->ht_params->smps_mode);
+
 	return 0;
 }
 
@@ -375,13 +406,11 @@
 		 * do it now if after settings changed.
 		 */
 		iwl_set_tx_power(priv, priv->tx_power_next, false);
-		return 0;
-	}
 
-	if (priv->cfg->ops->hcmd->set_pan_params) {
-		ret = priv->cfg->ops->hcmd->set_pan_params(priv);
-		if (ret)
-			return ret;
+		/* make sure we are in the right PS state */
+		iwl_power_update_mode(priv, true);
+
+		return 0;
 	}
 
 	iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
@@ -405,6 +434,12 @@
 	if (ret)
 		return ret;
 
+	if (priv->cfg->ops->hcmd->set_pan_params) {
+		ret = priv->cfg->ops->hcmd->set_pan_params(priv);
+		if (ret)
+			return ret;
+	}
+
 	if (new_assoc)
 		return iwlagn_rxon_connect(priv, ctx);
 
@@ -770,6 +805,13 @@
 	struct iwl_rxon_context *ctx;
 
 	/*
+	 * We do not commit power settings while scan is pending,
+	 * do it now if the settings changed.
+	 */
+	iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
+	iwl_set_tx_power(priv, priv->tx_power_next, false);
+
+	/*
 	 * Since setting the RXON may have been deferred while
 	 * performing the scan, fire one off if needed
 	 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 4974cd7..8bd48f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -1033,8 +1033,8 @@
 	if (unlikely(tx_fifo < 0))
 		return tx_fifo;
 
-	IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
-			__func__, sta->addr, tid);
+	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
+		     sta->addr, tid);
 
 	sta_id = iwl_sta_id(sta);
 	if (sta_id == IWL_INVALID_STATION) {
@@ -1236,9 +1236,9 @@
 	struct ieee80211_hdr *hdr;
 
 	if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-			  "is out of range [0-%d] %d %d.\n", txq_id,
-			  index, q->n_bd, q->write_ptr, q->read_ptr);
+		IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
+			  "index %d is out of range [0-%d] %d %d.\n", __func__,
+			  txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
 		return 0;
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8e1942e..099c279 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -97,7 +97,7 @@
 		for_each_context(priv, ctx) {
 			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 			if (ctx->active.rx_chain != ctx->staging.rx_chain)
-				iwlcore_commit_rxon(priv, ctx);
+				iwlagn_commit_rxon(priv, ctx);
 		}
 	}
 }
@@ -274,7 +274,7 @@
 	for_each_context(priv, ctx) {
 		if (priv->cfg->ops->hcmd->set_rxon_chain)
 			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-		iwlcore_commit_rxon(priv, ctx);
+		iwlagn_commit_rxon(priv, ctx);
 	}
 
 	priv->cfg->ops->hcmd->send_bt_config(priv);
@@ -2056,7 +2056,7 @@
 	set_bit(STATUS_READY, &priv->status);
 
 	/* Configure the adapter for unassociated operation */
-	ret = iwlcore_commit_rxon(priv, ctx);
+	ret = iwlagn_commit_rxon(priv, ctx);
 	if (ret)
 		return ret;
 
@@ -2420,6 +2420,77 @@
  *
  *****************************************************************************/
 
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_GO) |
+			 BIT(NL80211_IFTYPE_AP),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .beacon_int_infra_match = true,
+	  .limits = iwlagn_sta_ap_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+	},
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .limits = iwlagn_2sta_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+	},
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .beacon_int_infra_match = true,
+	  .limits = iwlagn_p2p_sta_go_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+	},
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .limits = iwlagn_p2p_2sta_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+	},
+};
+
 /*
  * Not a mac80211 entry point function, but it fits in with all the
  * other mac80211 functions grouped here.
@@ -2460,6 +2531,18 @@
 		hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
 	}
 
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+		hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+		hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+	} else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+		hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
+		hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+	}
+
 	hw->wiphy->max_remain_on_channel_duration = 1000;
 
 	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
@@ -2711,12 +2794,9 @@
 			ret = 0;
 		if (priv->cfg->ht_params &&
 		    priv->cfg->ht_params->use_rts_for_aggregation) {
-			struct iwl_station_priv *sta_priv =
-				(void *) sta->drv_priv;
 			/*
 			 * switch off RTS/CTS if it was previously enabled
 			 */
-
 			sta_priv->lq_sta.lq.general_params.flags &=
 				~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
 			iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
@@ -2764,6 +2844,9 @@
 
 		iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
 				&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+
+		IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+			 sta->addr, tid);
 		ret = 0;
 		break;
 	}
@@ -2833,7 +2916,6 @@
 	 */
 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 	u16 ch;
-	unsigned long flags = 0;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 
@@ -2850,65 +2932,64 @@
 	if (!iwl_is_associated_ctx(ctx))
 		goto out;
 
-	if (priv->cfg->ops->lib->set_channel_switch) {
+	if (!priv->cfg->ops->lib->set_channel_switch)
+		goto out;
 
-		ch = channel->hw_value;
-		if (le16_to_cpu(ctx->active.channel) != ch) {
-			ch_info = iwl_get_channel_info(priv,
-						       channel->band,
-						       ch);
-			if (!is_channel_valid(ch_info)) {
-				IWL_DEBUG_MAC80211(priv, "invalid channel\n");
-				goto out;
-			}
-			spin_lock_irqsave(&priv->lock, flags);
+	ch = channel->hw_value;
+	if (le16_to_cpu(ctx->active.channel) == ch)
+		goto out;
 
-			priv->current_ht_config.smps = conf->smps_mode;
-
-			/* Configure HT40 channels */
-			ctx->ht.enabled = conf_is_ht(conf);
-			if (ctx->ht.enabled) {
-				if (conf_is_ht40_minus(conf)) {
-					ctx->ht.extension_chan_offset =
-						IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-					ctx->ht.is_40mhz = true;
-				} else if (conf_is_ht40_plus(conf)) {
-					ctx->ht.extension_chan_offset =
-						IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-					ctx->ht.is_40mhz = true;
-				} else {
-					ctx->ht.extension_chan_offset =
-						IEEE80211_HT_PARAM_CHA_SEC_NONE;
-					ctx->ht.is_40mhz = false;
-				}
-			} else
-				ctx->ht.is_40mhz = false;
-
-			if ((le16_to_cpu(ctx->staging.channel) != ch))
-				ctx->staging.flags = 0;
-
-			iwl_set_rxon_channel(priv, channel, ctx);
-			iwl_set_rxon_ht(priv, ht_conf);
-			iwl_set_flags_for_band(priv, ctx, channel->band,
-					       ctx->vif);
-			spin_unlock_irqrestore(&priv->lock, flags);
-
-			iwl_set_rate(priv);
-			/*
-			 * at this point, staging_rxon has the
-			 * configuration for channel switch
-			 */
-			set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
-			priv->switch_channel = cpu_to_le16(ch);
-			if (priv->cfg->ops->lib->set_channel_switch(priv,
-								    ch_switch)) {
-				clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
-					  &priv->status);
-				priv->switch_channel = 0;
-				ieee80211_chswitch_done(ctx->vif, false);
-			}
-		}
+	ch_info = iwl_get_channel_info(priv, channel->band, ch);
+	if (!is_channel_valid(ch_info)) {
+		IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+		goto out;
 	}
+
+	spin_lock_irq(&priv->lock);
+
+	priv->current_ht_config.smps = conf->smps_mode;
+
+	/* Configure HT40 channels */
+	ctx->ht.enabled = conf_is_ht(conf);
+	if (ctx->ht.enabled) {
+		if (conf_is_ht40_minus(conf)) {
+			ctx->ht.extension_chan_offset =
+				IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+			ctx->ht.is_40mhz = true;
+		} else if (conf_is_ht40_plus(conf)) {
+			ctx->ht.extension_chan_offset =
+				IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+			ctx->ht.is_40mhz = true;
+		} else {
+			ctx->ht.extension_chan_offset =
+				IEEE80211_HT_PARAM_CHA_SEC_NONE;
+			ctx->ht.is_40mhz = false;
+		}
+	} else
+		ctx->ht.is_40mhz = false;
+
+	if ((le16_to_cpu(ctx->staging.channel) != ch))
+		ctx->staging.flags = 0;
+
+	iwl_set_rxon_channel(priv, channel, ctx);
+	iwl_set_rxon_ht(priv, ht_conf);
+	iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+	spin_unlock_irq(&priv->lock);
+
+	iwl_set_rate(priv);
+	/*
+	 * at this point, staging_rxon has the
+	 * configuration for channel switch
+	 */
+	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+	priv->switch_channel = cpu_to_le16(ch);
+	if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
+		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+		priv->switch_channel = 0;
+		ieee80211_chswitch_done(ctx->vif, false);
+	}
+
 out:
 	mutex_unlock(&priv->mutex);
 	IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3019,7 +3100,7 @@
 
 	priv->_agn.hw_roc_channel = NULL;
 
-	iwlcore_commit_rxon(priv, ctx);
+	iwlagn_commit_rxon(priv, ctx);
 
 	ctx->is_active = false;
 }
@@ -3062,7 +3143,7 @@
 	priv->_agn.hw_roc_channel = channel;
 	priv->_agn.hw_roc_chantype = channel_type;
 	priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
-	iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+	iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
 	queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
 			   msecs_to_jiffies(duration + 20));
 
@@ -3619,8 +3700,8 @@
 	destroy_workqueue(priv->workqueue);
 	priv->workqueue = NULL;
 	free_irq(priv->pci_dev->irq, priv);
-	iwl_free_isr_ict(priv);
  out_disable_msi:
+	iwl_free_isr_ict(priv);
 	pci_disable_msi(priv->pci_dev);
 	iwl_uninit_drv(priv);
  out_free_eeprom:
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 213c80c..5416b12 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1127,9 +1127,6 @@
 	if (priv->tx_power_user_lmt == tx_power && !force)
 		return 0;
 
-	if (!priv->cfg->ops->lib->send_tx_power)
-		return -EOPNOTSUPP;
-
 	if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
 		IWL_WARN(priv,
 			 "Requested user TXPOWER %d below lower limit %d.\n",
@@ -1163,7 +1160,7 @@
 	prev_tx_power = priv->tx_power_user_lmt;
 	priv->tx_power_user_lmt = tx_power;
 
-	ret = priv->cfg->ops->lib->send_tx_power(priv);
+	ret = iwlagn_send_tx_power(priv);
 
 	/* if fail to set tx_power, restore the orig. tx power */
 	if (ret) {
@@ -1278,7 +1275,7 @@
 	if (priv->cfg->ops->hcmd->set_rxon_chain)
 		priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 
-	return iwlcore_commit_rxon(priv, ctx);
+	return iwlagn_commit_rxon(priv, ctx);
 }
 
 static int iwl_setup_interface(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a54d416..05ea88a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,7 +90,6 @@
 #define IWL_CMD(x) case x: return #x
 
 struct iwl_hcmd_ops {
-	int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
 	void (*set_rxon_chain)(struct iwl_priv *priv,
 			       struct iwl_rxon_context *ctx);
 	int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
@@ -112,7 +111,6 @@
 	int  (*calc_rssi)(struct iwl_priv *priv,
 			  struct iwl_rx_phy_res *rx_resp);
 	int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
-	void (*post_scan)(struct iwl_priv *priv);
 };
 
 struct iwl_apm_ops {
@@ -141,7 +139,6 @@
 	struct iwl_apm_ops apm_ops;
 
 	/* power */
-	int (*send_tx_power) (struct iwl_priv *priv);
 	void (*update_chain_flags)(struct iwl_priv *priv);
 
 	/* eeprom operations (as defined in iwl-eeprom.h) */
@@ -225,7 +222,7 @@
  * @ampdu_factor: Maximum A-MPDU length factor
  * @ampdu_density: Minimum A-MPDU spacing
  * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
-*/
+ */
 struct iwl_bt_params {
 	bool advanced_bt_coexist;
 	u8 bt_init_traffic_load;
@@ -238,10 +235,11 @@
 };
 /*
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
-*/
+ */
 struct iwl_ht_params {
 	const bool ht_greenfield_support; /* if used set to true */
 	bool use_rts_for_aggregation;
+	enum ieee80211_smps_mode smps_mode;
 };
 
 /**
@@ -613,11 +611,7 @@
 int iwl_apm_init(struct iwl_priv *priv);
 
 int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
-				      struct iwl_rxon_context *ctx)
-{
-	return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
+
 static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
 			struct iwl_priv *priv, enum ieee80211_band band)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c8de236..7ad98d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -31,6 +31,7 @@
 #ifndef __iwl_dev_h__
 #define __iwl_dev_h__
 
+#include <linux/interrupt.h>
 #include <linux/pci.h> /* for struct pci_device_id */
 #include <linux/kernel.h>
 #include <linux/wait.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index d60d630..438eecd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -36,6 +36,7 @@
 #include "iwl-sta.h"
 #include "iwl-io.h"
 #include "iwl-helpers.h"
+#include "iwl-agn.h"
 
 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
  * sending probe req.  This should be set long enough to hear probe responses
@@ -600,14 +601,7 @@
 	if (!iwl_is_ready_rf(priv))
 		goto out;
 
-	/*
-	 * We do not commit power settings while scan is pending,
-	 * do it now if the settings changed.
-	 */
-	iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
-	iwl_set_tx_power(priv, priv->tx_power_next, false);
-
-	priv->cfg->ops->utils->post_scan(priv);
+	iwlagn_post_scan(priv);
 
 out:
 	mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 686e176..1084fe0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -753,9 +753,9 @@
 	int nfreed = 0;
 
 	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
-		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-			  "is out of range [0-%d] %d %d.\n", txq_id,
-			  idx, q->n_bd, q->write_ptr, q->read_ptr);
+		IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
+			  "index %d is out of range [0-%d] %d %d.\n", __func__,
+			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
 		return;
 	}
 
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 4906709..6f1afe6 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -187,7 +187,7 @@
 		if (ret < 0)
 			goto err_release_fw;
 		opcode_idx++;
-	};
+	}
 
 	/* Read firmware version */
 	fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5d637af..b456a53 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -8,6 +8,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 71c8f3f..9dcf967 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,6 +3,7 @@
  * It prepares command and sends it to firmware when it is ready.
  */
 
+#include <linux/hardirq.h>
 #include <linux/kfifo.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 207fc36..2ffe5a1 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -3,6 +3,7 @@
  * responses as well as events generated by firmware.
  */
 
+#include <linux/hardirq.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 23250f6..1af1827 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -1,6 +1,7 @@
 #include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
+#include <linux/hardirq.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 29dbce4..4dfb3bf 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -1,3 +1,4 @@
+#include <linux/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/delay.h>
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 463352c..4fa0be9 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -19,6 +19,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <linux/firmware.h>
 #include <linux/jiffies.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 8c40949..cf3d2c8 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -9,6 +9,7 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
+#include <linux/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
 #include <linux/kthread.h>
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 24cf066..7969d10 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
+#include <linux/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index fdb0448..bfb8898 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -5,6 +5,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/etherdevice.h>
+#include <linux/hardirq.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <net/cfg80211.h>
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index bbb95f8..f19495b 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -1,6 +1,7 @@
 /*
  * This file contains the handling of TX in wlan driver.
  */
+#include <linux/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/sched.h>
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 8945afd..13557fe 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -9,6 +9,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
 #include <linux/slab.h>
 
 #include "libertas_tf.h"
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index d400508..5beb581 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -9,6 +9,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/hardirq.h>
 #include <linux/slab.h>
 
 #include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d4a40e..7e1fa96 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1,6 +1,7 @@
 /*
  * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
  * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -25,11 +26,17 @@
 #include <linux/rtnetlink.h>
 #include <linux/etherdevice.h>
 #include <linux/debugfs.h>
+#include <net/genetlink.h>
+#include "mac80211_hwsim.h"
+
+#define WARN_QUEUE 100
+#define MAX_QUEUE 200
 
 MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
 MODULE_LICENSE("GPL");
 
+int wmediumd_pid;
 static int radios = 2;
 module_param(radios, int, 0444);
 MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -302,6 +309,7 @@
 	struct dentry *debugfs;
 	struct dentry *debugfs_ps;
 
+	struct sk_buff_head pending;	/* packets pending */
 	/*
 	 * Only radios in the same group can communicate together (the
 	 * channel has to match too). Each bit represents a group. A
@@ -322,6 +330,32 @@
 	__le16 rt_chbitmask;
 } __packed;
 
+/* MAC80211_HWSIM netlinf family */
+static struct genl_family hwsim_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.hdrsize = 0,
+	.name = "MAC80211_HWSIM",
+	.version = 1,
+	.maxattr = HWSIM_ATTR_MAX,
+};
+
+/* MAC80211_HWSIM netlink policy */
+
+static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+	[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC,
+				       .len = 6*sizeof(u8) },
+	[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC,
+					  .len = 6*sizeof(u8) },
+	[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
+			       .len = IEEE80211_MAX_DATA_LEN },
+	[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
+	[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
+	[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
+	[HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
+				 .len = IEEE80211_TX_MAX_RATES*sizeof(
+					struct hwsim_tx_rate)},
+	[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
+};
 
 static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
 					struct net_device *dev)
@@ -478,9 +512,89 @@
 	return md.ret;
 }
 
+static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
+				       struct sk_buff *my_skb,
+				       int dst_pid)
+{
+	struct sk_buff *skb;
+	struct mac80211_hwsim_data *data = hw->priv;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb);
+	void *msg_head;
+	unsigned int hwsim_flags = 0;
+	int i;
+	struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
 
-static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
-				    struct sk_buff *skb)
+	if (data->idle) {
+		wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+		dev_kfree_skb(my_skb);
+		return;
+	}
+
+	if (data->ps != PS_DISABLED)
+		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+	/* If the queue contains MAX_QUEUE skb's drop some */
+	if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
+		/* Droping until WARN_QUEUE level */
+		while (skb_queue_len(&data->pending) >= WARN_QUEUE)
+			skb_dequeue(&data->pending);
+	}
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (skb == NULL)
+		goto nla_put_failure;
+
+	msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
+			       HWSIM_CMD_FRAME);
+	if (msg_head == NULL) {
+		printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n");
+		goto nla_put_failure;
+	}
+
+	NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+		     sizeof(struct mac_address), data->addresses[1].addr);
+
+	/* We get the skb->data */
+	NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data);
+
+	/* We get the flags for this transmission, and we translate them to
+	   wmediumd flags  */
+
+	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+		hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS;
+
+	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+		hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
+
+	NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags);
+
+	/* We get the tx control (rate and retries) info*/
+
+	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+		tx_attempts[i].idx = info->status.rates[i].idx;
+		tx_attempts[i].count = info->status.rates[i].count;
+	}
+
+	NLA_PUT(skb, HWSIM_ATTR_TX_INFO,
+		     sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
+		     tx_attempts);
+
+	/* We create a cookie to identify this skb */
+	NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb);
+
+	genlmsg_end(skb, msg_head);
+	genlmsg_unicast(&init_net, skb, dst_pid);
+
+	/* Enqueue the packet */
+	skb_queue_tail(&data->pending, my_skb);
+	return;
+
+nla_put_failure:
+	printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+}
+
+static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
+					  struct sk_buff *skb)
 {
 	struct mac80211_hwsim_data *data = hw->priv, *data2;
 	bool ack = false;
@@ -540,11 +654,11 @@
 	return ack;
 }
 
-
 static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	bool ack;
 	struct ieee80211_tx_info *txi;
+	int _pid;
 
 	mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -554,7 +668,15 @@
 		return;
 	}
 
-	ack = mac80211_hwsim_tx_frame(hw, skb);
+	/* wmediumd mode check */
+	_pid = wmediumd_pid;
+
+	if (_pid)
+		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+	/* NO wmediumd detected, perfect medium simulation */
+	ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
+
 	if (ack && skb->len >= 16) {
 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 		mac80211_hwsim_monitor_ack(hw, hdr->addr2);
@@ -635,6 +757,7 @@
 	struct ieee80211_hw *hw = arg;
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *info;
+	int _pid;
 
 	hwsim_check_magic(vif);
 
@@ -649,7 +772,14 @@
 	info = IEEE80211_SKB_CB(skb);
 
 	mac80211_hwsim_monitor_rx(hw, skb);
-	mac80211_hwsim_tx_frame(hw, skb);
+
+	/* wmediumd mode check */
+	_pid = wmediumd_pid;
+
+	if (_pid)
+		return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+	mac80211_hwsim_tx_frame_no_nl(hw, skb);
 	dev_kfree_skb(skb);
 }
 
@@ -966,12 +1096,7 @@
 
 static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
 {
-	/*
-	 * In this special case, there's nothing we need to
-	 * do because hwsim does transmission synchronously.
-	 * In the future, when it does transmissions via
-	 * userspace, we may need to do something.
-	 */
+	/* Not implemented, queues only on kernel side */
 }
 
 struct hw_scan_done {
@@ -1119,6 +1244,7 @@
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_pspoll *pspoll;
+	int _pid;
 
 	if (!vp->assoc)
 		return;
@@ -1137,8 +1263,15 @@
 	pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
 	memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
 	memcpy(pspoll->ta, mac, ETH_ALEN);
-	if (!mac80211_hwsim_tx_frame(data->hw, skb))
-		printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__);
+
+	/* wmediumd mode check */
+	_pid = wmediumd_pid;
+
+	if (_pid)
+		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+
+	if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
+		printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
 	dev_kfree_skb(skb);
 }
 
@@ -1149,6 +1282,7 @@
 	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
 	struct sk_buff *skb;
 	struct ieee80211_hdr *hdr;
+	int _pid;
 
 	if (!vp->assoc)
 		return;
@@ -1168,7 +1302,14 @@
 	memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
 	memcpy(hdr->addr2, mac, ETH_ALEN);
 	memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
-	if (!mac80211_hwsim_tx_frame(data->hw, skb))
+
+	/* wmediumd mode check */
+	_pid = wmediumd_pid;
+
+	if (_pid)
+		return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+
+	if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
 		printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
 	dev_kfree_skb(skb);
 }
@@ -1248,6 +1389,273 @@
 			hwsim_fops_group_read, hwsim_fops_group_write,
 			"%llx\n");
 
+struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
+			     struct mac_address *addr)
+{
+	struct mac80211_hwsim_data *data;
+	bool _found = false;
+
+	spin_lock_bh(&hwsim_radio_lock);
+	list_for_each_entry(data, &hwsim_radios, list) {
+		if (memcmp(data->addresses[1].addr, addr,
+			  sizeof(struct mac_address)) == 0) {
+			_found = true;
+			break;
+		}
+	}
+	spin_unlock_bh(&hwsim_radio_lock);
+
+	if (!_found)
+		return NULL;
+
+	return data;
+}
+
+static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
+					   struct genl_info *info)
+{
+
+	struct ieee80211_hdr *hdr;
+	struct mac80211_hwsim_data *data2;
+	struct ieee80211_tx_info *txi;
+	struct hwsim_tx_rate *tx_attempts;
+	struct sk_buff __user *ret_skb;
+	struct sk_buff *skb, *tmp;
+	struct mac_address *src;
+	unsigned int hwsim_flags;
+
+	int i;
+	bool found = false;
+
+	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
+	   !info->attrs[HWSIM_ATTR_FLAGS] ||
+	   !info->attrs[HWSIM_ATTR_COOKIE] ||
+	   !info->attrs[HWSIM_ATTR_TX_INFO])
+		goto out;
+
+	src = (struct mac_address *)nla_data(
+				   info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
+	hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
+
+	ret_skb = (struct sk_buff __user *)
+		  (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
+
+	data2 = get_hwsim_data_ref_from_addr(src);
+
+	if (data2 == NULL)
+		goto out;
+
+	/* look for the skb matching the cookie passed back from user */
+	skb_queue_walk_safe(&data2->pending, skb, tmp) {
+		if (skb == ret_skb) {
+			skb_unlink(skb, &data2->pending);
+			found = true;
+			break;
+		}
+	}
+
+	/* not found */
+	if (!found)
+		goto out;
+
+	/* Tx info received because the frame was broadcasted on user space,
+	 so we get all the necessary info: tx attempts and skb control buff */
+
+	tx_attempts = (struct hwsim_tx_rate *)nla_data(
+		       info->attrs[HWSIM_ATTR_TX_INFO]);
+
+	/* now send back TX status */
+	txi = IEEE80211_SKB_CB(skb);
+
+	if (txi->control.vif)
+		hwsim_check_magic(txi->control.vif);
+	if (txi->control.sta)
+		hwsim_check_sta_magic(txi->control.sta);
+
+	ieee80211_tx_info_clear_status(txi);
+
+	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+		txi->status.rates[i].idx = tx_attempts[i].idx;
+		txi->status.rates[i].count = tx_attempts[i].count;
+		/*txi->status.rates[i].flags = 0;*/
+	}
+
+	txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+
+	if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) &&
+	   (hwsim_flags & HWSIM_TX_STAT_ACK)) {
+		if (skb->len >= 16) {
+			hdr = (struct ieee80211_hdr *) skb->data;
+			mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
+		}
+	}
+	ieee80211_tx_status_irqsafe(data2->hw, skb);
+	return 0;
+out:
+	return -EINVAL;
+
+}
+
+static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+					  struct genl_info *info)
+{
+
+	struct mac80211_hwsim_data  *data2;
+	struct ieee80211_rx_status rx_status;
+	struct mac_address *dst;
+	int frame_data_len;
+	char *frame_data;
+	struct sk_buff *skb = NULL;
+
+	if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
+	   !info->attrs[HWSIM_ATTR_FRAME] ||
+	   !info->attrs[HWSIM_ATTR_RX_RATE] ||
+	   !info->attrs[HWSIM_ATTR_SIGNAL])
+		goto out;
+
+	dst = (struct mac_address *)nla_data(
+				   info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
+
+	frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
+	frame_data = (char *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+
+	/* Allocate new skb here */
+	skb = alloc_skb(frame_data_len, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	if (frame_data_len <= IEEE80211_MAX_DATA_LEN) {
+		/* Copy the data */
+		memcpy(skb_put(skb, frame_data_len), frame_data,
+		       frame_data_len);
+	} else
+		goto err;
+
+	data2 = get_hwsim_data_ref_from_addr(dst);
+
+	if (data2 == NULL)
+		goto out;
+
+	/* check if radio is configured properly */
+
+	if (data2->idle || !data2->started || !data2->channel)
+		goto out;
+
+	/*A frame is received from user space*/
+	memset(&rx_status, 0, sizeof(rx_status));
+	rx_status.freq = data2->channel->center_freq;
+	rx_status.band = data2->channel->band;
+	rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+	rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+
+	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+	ieee80211_rx_irqsafe(data2->hw, skb);
+
+	return 0;
+err:
+	printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+	goto out;
+out:
+	dev_kfree_skb(skb);
+	return -EINVAL;
+}
+
+static int hwsim_register_received_nl(struct sk_buff *skb_2,
+				      struct genl_info *info)
+{
+	if (info == NULL)
+		goto out;
+
+	wmediumd_pid = info->snd_pid;
+
+	printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
+	"switching to wmediumd mode with pid %d\n", info->snd_pid);
+
+	return 0;
+out:
+	printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+	return -EINVAL;
+}
+
+/* Generic Netlink operations array */
+static struct genl_ops hwsim_ops[] = {
+	{
+		.cmd = HWSIM_CMD_REGISTER,
+		.policy = hwsim_genl_policy,
+		.doit = hwsim_register_received_nl,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = HWSIM_CMD_FRAME,
+		.policy = hwsim_genl_policy,
+		.doit = hwsim_cloned_frame_received_nl,
+	},
+	{
+		.cmd = HWSIM_CMD_TX_INFO_FRAME,
+		.policy = hwsim_genl_policy,
+		.doit = hwsim_tx_info_frame_received_nl,
+	},
+};
+
+static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
+					 unsigned long state,
+					 void *_notify)
+{
+	struct netlink_notify *notify = _notify;
+
+	if (state != NETLINK_URELEASE)
+		return NOTIFY_DONE;
+
+	if (notify->pid == wmediumd_pid) {
+		printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
+		       " socket, switching to perfect channel medium\n");
+		wmediumd_pid = 0;
+	}
+	return NOTIFY_DONE;
+
+}
+
+static struct notifier_block hwsim_netlink_notifier = {
+	.notifier_call = mac80211_hwsim_netlink_notify,
+};
+
+static int hwsim_init_netlink(void)
+{
+	int rc;
+	printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
+
+	wmediumd_pid = 0;
+
+	rc = genl_register_family_with_ops(&hwsim_genl_family,
+		hwsim_ops, ARRAY_SIZE(hwsim_ops));
+	if (rc)
+		goto failure;
+
+	rc = netlink_register_notifier(&hwsim_netlink_notifier);
+	if (rc)
+		goto failure;
+
+	return 0;
+
+failure:
+	printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+	return -EINVAL;
+}
+
+static void hwsim_exit_netlink(void)
+{
+	int ret;
+
+	printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
+	/* unregister the notifier */
+	netlink_unregister_notifier(&hwsim_netlink_notifier);
+	/* unregister the family */
+	ret = genl_unregister_family(&hwsim_genl_family);
+	if (ret)
+		printk(KERN_DEBUG "mac80211_hwsim: "
+		       "unregister family %i\n", ret);
+}
+
 static int __init init_mac80211_hwsim(void)
 {
 	int i, err = 0;
@@ -1298,6 +1706,7 @@
 			goto failed_drvdata;
 		}
 		data->dev->driver = &mac80211_hwsim_driver;
+		skb_queue_head_init(&data->pending);
 
 		SET_IEEE80211_DEV(hw, data->dev);
 		addr[3] = i >> 8;
@@ -1379,6 +1788,10 @@
 		data->group = 1;
 		mutex_init(&data->mutex);
 
+		/* Enable frame retransmissions for lossy channels */
+		hw->max_rates = 4;
+		hw->max_rate_tries = 11;
+
 		/* Work to be done prior to ieee80211_register_hw() */
 		switch (regtest) {
 		case HWSIM_REGTEST_DISABLED:
@@ -1515,12 +1928,29 @@
 	if (hwsim_mon == NULL)
 		goto failed;
 
-	err = register_netdev(hwsim_mon);
+	rtnl_lock();
+
+	err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
 	if (err < 0)
 		goto failed_mon;
 
+
+	err = register_netdevice(hwsim_mon);
+	if (err < 0)
+		goto failed_mon;
+
+	rtnl_unlock();
+
+	err = hwsim_init_netlink();
+	if (err < 0)
+		goto failed_nl;
+
 	return 0;
 
+failed_nl:
+	printk(KERN_DEBUG "mac_80211_hwsim: failed initializing netlink\n");
+	return err;
+
 failed_mon:
 	rtnl_unlock();
 	free_netdev(hwsim_mon);
@@ -1541,6 +1971,8 @@
 {
 	printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
 
+	hwsim_exit_netlink();
+
 	mac80211_hwsim_free();
 	unregister_netdev(hwsim_mon);
 }
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
new file mode 100644
index 0000000..afaad5a
--- /dev/null
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -0,0 +1,133 @@
+/*
+ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
+ * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAC80211_HWSIM_H
+#define __MAC80211_HWSIM_H
+
+/**
+ * enum hwsim_tx_control_flags - flags to describe transmission info/status
+ *
+ * These flags are used to give the wmediumd extra information in order to
+ * modify its behavior for each frame
+ *
+ * @HWSIM_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame.
+ * @HWSIM_TX_CTL_NO_ACK: tell the wmediumd not to wait for an ack
+ * @HWSIM_TX_STAT_ACK: Frame was acknowledged
+ *
+ */
+enum hwsim_tx_control_flags {
+	HWSIM_TX_CTL_REQ_TX_STATUS		= BIT(0),
+	HWSIM_TX_CTL_NO_ACK			= BIT(1),
+	HWSIM_TX_STAT_ACK			= BIT(2),
+};
+
+/**
+ * DOC: Frame transmission/registration support
+ *
+ * Frame transmission and registration support exists to allow userspace
+ * entities such as wmediumd to receive and process all broadcasted
+ * frames from a mac80211_hwsim radio device.
+ *
+ * This allow user space applications to decide if the frame should be
+ * dropped or not and implement a wireless medium simulator at user space.
+ *
+ * Registration is done by sending a register message to the driver and
+ * will be automatically unregistered if the user application doesn't
+ * responds to sent frames.
+ * Once registered the user application has to take responsibility of
+ * broadcasting the frames to all listening mac80211_hwsim radio
+ * interfaces.
+ *
+ * For more technical details, see the corresponding command descriptions
+ * below.
+ */
+
+/**
+ * enum hwsim_commands - supported hwsim commands
+ *
+ * @HWSIM_CMD_UNSPEC: unspecified command to catch errors
+ *
+ * @HWSIM_CMD_REGISTER: request to register and received all broadcasted
+ *	frames by any mac80211_hwsim radio device.
+ * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user
+ * space, uses:
+ *	%HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
+ *	%HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
+ *	%HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
+ * kernel, uses:
+ *	%HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
+ *	%HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @__HWSIM_CMD_MAX: enum limit
+ */
+enum {
+	HWSIM_CMD_UNSPEC,
+	HWSIM_CMD_REGISTER,
+	HWSIM_CMD_FRAME,
+	HWSIM_CMD_TX_INFO_FRAME,
+	__HWSIM_CMD_MAX,
+};
+#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
+
+/**
+ * enum hwsim_attrs - hwsim netlink attributes
+ *
+ * @HWSIM_ATTR_UNSPEC: unspecified attribute to catch errors
+ *
+ * @HWSIM_ATTR_ADDR_RECEIVER: MAC address of the radio device that
+ *	the frame is broadcasted to
+ * @HWSIM_ATTR_ADDR_TRANSMITTER: MAC address of the radio device that
+ *	the frame was broadcasted from
+ * @HWSIM_ATTR_FRAME: Data array
+ * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process
+	properly the frame at user space
+ * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user
+	space
+ * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user
+	space
+ * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array
+ * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame
+ * @__HWSIM_ATTR_MAX: enum limit
+ */
+
+
+enum {
+	HWSIM_ATTR_UNSPEC,
+	HWSIM_ATTR_ADDR_RECEIVER,
+	HWSIM_ATTR_ADDR_TRANSMITTER,
+	HWSIM_ATTR_FRAME,
+	HWSIM_ATTR_FLAGS,
+	HWSIM_ATTR_RX_RATE,
+	HWSIM_ATTR_SIGNAL,
+	HWSIM_ATTR_TX_INFO,
+	HWSIM_ATTR_COOKIE,
+	__HWSIM_ATTR_MAX,
+};
+#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
+
+/**
+ * struct hwsim_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate and number of retries used.
+ *
+ */
+struct hwsim_tx_rate {
+	s8 idx;
+	u8 count;
+} __packed;
+
+#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index f807447..1a453a6 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -164,12 +164,13 @@
 	struct mwifiex_tx_param tx_param;
 	struct txpd *ptx_pd = NULL;
 
-	if (skb_queue_empty(&pra_list->skb_head)) {
+	skb_src = skb_peek(&pra_list->skb_head);
+	if (!skb_src) {
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
 		return 0;
 	}
-	skb_src = skb_peek(&pra_list->skb_head);
+
 	tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
 	skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
 	if (!skb_aggr) {
@@ -184,17 +185,15 @@
 	tx_info_aggr->bss_index = tx_info_src->bss_index;
 	skb_aggr->priority = skb_src->priority;
 
-	while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len
-					+ LLC_SNAP_LEN)
-				<= adapter->tx_buf_size)) {
+	do {
+		/* Check if AMSDU can accommodate this MSDU */
+		if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+			break;
 
-		if (!skb_queue_empty(&pra_list->skb_head))
-			skb_src = skb_dequeue(&pra_list->skb_head);
-		else
-			skb_src = NULL;
+		skb_src = skb_dequeue(&pra_list->skb_head);
 
-		if (skb_src)
-			pra_list->total_pkts_size -= skb_src->len;
+		pra_list->total_pkts_size -= skb_src->len;
+		pra_list->total_pkts--;
 
 		atomic_dec(&priv->wmm.tx_pkts_queued);
 
@@ -212,11 +211,15 @@
 			return -1;
 		}
 
-		if (!skb_queue_empty(&pra_list->skb_head))
-			skb_src = skb_peek(&pra_list->skb_head);
-		else
-			skb_src = NULL;
-	}
+		if (skb_tailroom(skb_aggr) < pad) {
+			pad = 0;
+			break;
+		}
+		skb_put(skb_aggr, pad);
+
+		skb_src = skb_peek(&pra_list->skb_head);
+
+	} while (skb_src);
 
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
 
@@ -230,11 +233,19 @@
 
 	skb_push(skb_aggr, headroom);
 
-	tx_param.next_pkt_len = ((pra_list->total_pkts_size) ?
-				 (((pra_list->total_pkts_size) >
-				   adapter->tx_buf_size) ? adapter->
-				  tx_buf_size : pra_list->total_pkts_size +
-				  LLC_SNAP_LEN + sizeof(struct txpd)) : 0);
+	/*
+	 * Padding per MSDU will affect the length of next
+	 * packet and hence the exact length of next packet
+	 * is uncertain here.
+	 *
+	 * Also, aggregation of transmission buffer, while
+	 * downloading the data to the card, wont gain much
+	 * on the AMSDU packets as the AMSDU packets utilizes
+	 * the transmission buffer space to the maximum
+	 * (adapter->tx_buf_size).
+	 */
+	tx_param.next_pkt_len = 0;
+
 	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
 					     skb_aggr->data,
 					     skb_aggr->len, &tx_param);
@@ -258,6 +269,7 @@
 		skb_queue_tail(&pra_list->skb_head, skb_aggr);
 
 		pra_list->total_pkts_size += skb_aggr->len;
+		pra_list->total_pkts++;
 
 		atomic_inc(&priv->wmm.tx_pkts_queued);
 
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index f058225..4f43443 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -35,8 +35,6 @@
 
 static int drv_mode = DRV_MODE_STA;
 
-static char fw_name[32] = DEFAULT_FW_NAME;
-
 /* Supported drv_mode table */
 static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
 	{
@@ -384,20 +382,8 @@
 
 	memset(&fw, 0, sizeof(struct mwifiex_fw_image));
 
-	switch (adapter->revision_id) {
-	case SD8787_W0:
-	case SD8787_W1:
-		strcpy(fw_name, SD8787_W1_FW_NAME);
-		break;
-	case SD8787_A0:
-	case SD8787_A1:
-		strcpy(fw_name, SD8787_AX_FW_NAME);
-		break;
-	default:
-		break;
-	}
-
-	err = request_firmware(&adapter->firmware, fw_name, adapter->dev);
+	err = request_firmware(&adapter->firmware, adapter->fw_name,
+			       adapter->dev);
 	if (err < 0) {
 		dev_err(adapter->dev, "request_firmware() returned"
 				" error code %#x\n", err);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 8316b3c..57b183a 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -48,15 +48,6 @@
 
 #define DRV_MODE_STA       0x1
 
-#define SD8787_W0   0x30
-#define SD8787_W1   0x31
-#define SD8787_A0   0x40
-#define SD8787_A1   0x41
-
-#define DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
-#define SD8787_W1_FW_NAME "mrvl/sd8787_uapsta_w1.bin"
-#define SD8787_AX_FW_NAME "mrvl/sd8787_uapsta.bin"
-
 struct mwifiex_drv_mode {
 	u16 drv_mode;
 	u16 intf_num;
@@ -190,6 +181,7 @@
 	struct sk_buff_head skb_head;
 	u8 ra[ETH_ALEN];
 	u32 total_pkts_size;
+	u32 total_pkts;
 	u32 is_11n_enabled;
 };
 
@@ -576,10 +568,10 @@
 	u8 priv_num;
 	struct mwifiex_drv_mode *drv_mode;
 	const struct firmware *firmware;
+	char fw_name[32];
 	struct device *dev;
 	bool surprise_removed;
 	u32 fw_release_number;
-	u32 revision_id;
 	u16 init_wait_q_woken;
 	wait_queue_head_t init_wait_q;
 	void *card;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d425dbd..4327b6d 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1531,6 +1531,7 @@
 	sdio_set_drvdata(func, card);
 
 	adapter->dev = &func->dev;
+	strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
 
 	return 0;
 
@@ -1552,7 +1553,6 @@
  *        the first interrupt got from bootloader
  *      - Disable host interrupt mask register
  *      - Get SDIO port
- *      - Get revision ID
  *      - Initialize SDIO variables in card
  *      - Allocate MP registers
  *      - Allocate MPA Tx and Rx buffers
@@ -1576,10 +1576,6 @@
 	/* Get SDIO ioport */
 	mwifiex_init_sdio_ioport(adapter);
 
-	/* Get revision ID */
-#define REV_ID_REG	0x5c
-	mwifiex_read_reg(adapter, REV_ID_REG, &adapter->revision_id);
-
 	/* Initialize SDIO variables in card */
 	card->mp_rd_bitmap = 0;
 	card->mp_wr_bitmap = 0;
@@ -1751,4 +1747,4 @@
 MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
 MODULE_VERSION(SDIO_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("sd8787.bin");
+MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 4e97e90..c925376 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -28,6 +28,8 @@
 
 #include "main.h"
 
+#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+
 #define BLOCK_MODE	1
 #define BYTE_MODE	0
 
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 91634da..67b2d0b 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -121,6 +121,7 @@
 	memcpy(ra_list->ra, ra, ETH_ALEN);
 
 	ra_list->total_pkts_size = 0;
+	ra_list->total_pkts = 0;
 
 	dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
 
@@ -645,6 +646,7 @@
 	skb_queue_tail(&ra_list->skb_head, skb);
 
 	ra_list->total_pkts_size += skb->len;
+	ra_list->total_pkts++;
 
 	atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -971,28 +973,6 @@
 }
 
 /*
- * This function gets the number of packets in the Tx queue of a
- * particular RA list.
- */
-static int
-mwifiex_num_pkts_in_txq(struct mwifiex_private *priv,
-			struct mwifiex_ra_list_tbl *ptr, int max_buf_size)
-{
-	int count = 0, total_size = 0;
-	struct sk_buff *skb, *tmp;
-
-	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
-		total_size += skb->len;
-		if (total_size < max_buf_size)
-			++count;
-		else
-			break;
-	}
-
-	return count;
-}
-
-/*
  * This function sends a single packet to firmware for transmission.
  */
 static void
@@ -1019,6 +999,7 @@
 	dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
 
 	ptr->total_pkts_size -= skb->len;
+	ptr->total_pkts--;
 
 	if (!skb_queue_empty(&ptr->skb_head))
 		skb_next = skb_peek(&ptr->skb_head);
@@ -1044,6 +1025,7 @@
 		skb_queue_tail(&ptr->skb_head, skb);
 
 		ptr->total_pkts_size += skb->len;
+		ptr->total_pkts++;
 		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
 		spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
 				       ra_list_flags);
@@ -1231,9 +1213,9 @@
 		}
 /* Minimum number of AMSDU */
 #define MIN_NUM_AMSDU 2
+
 		if (mwifiex_is_amsdu_allowed(priv, tid) &&
-		    (mwifiex_num_pkts_in_txq(priv, ptr, adapter->tx_buf_size) >=
-		     MIN_NUM_AMSDU))
+				(ptr->total_pkts >= MIN_NUM_AMSDU))
 			mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
 						  ptr_index, flags);
 			/* ra_list_spinlock has been freed in
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index aeac3cc..d633edb 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 62c6b2b..b0f233f 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1958,7 +1958,7 @@
 
 		evstat = hermes_read_regn(hw, EVSTAT);
 		events = evstat & hw->inten;
-	};
+	}
 
 	orinoco_unlock(priv, &flags);
 	return IRQ_HANDLED;
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index ee9bc62..7aa509f 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -1,5 +1,6 @@
 #ifndef P54PCI_H
 #define P54PCI_H
+#include <linux/interrupt.h>
 
 /*
  * Defines for PCI based mac80211 Prism54 driver
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index ec2c75d..5d0f615 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -18,6 +18,7 @@
  *
  */
 
+#include <linux/hardirq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index c4d0f19..c404038 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -22,6 +22,7 @@
 #ifndef _ISLPCI_DEV_H
 #define _ISLPCI_DEV_H
 
+#include <linux/irqreturn.h>
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
 #include <net/iw_handler.h>
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index b5e64d7..9e68e0c 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -17,6 +17,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index b2f8b8f..a0a7854 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -83,14 +83,12 @@
 config RT2800PCI_RT35XX
 	bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
-	default n
+	default y
 	---help---
 	  This adds support for rt35xx wireless chipset family to the
 	  rt2800pci driver.
 	  Supported chips: RT3060, RT3062, RT3562, RT3592
 
-	  Support for these devices is non-functional at the moment and is
-	  intended for testers and developers.
 
 config RT2800PCI_RT53XX
        bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
@@ -154,15 +152,12 @@
 config RT2800USB_RT35XX
 	bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
-	default n
+	default y
 	---help---
 	  This adds support for rt35xx wireless chipset family to the
 	  rt2800usb driver.
 	  Supported chips: RT3572
 
-	  Support for these devices is non-functional at the moment and is
-	  intended for testers and developers.
-
 config RT2800USB_RT53XX
        bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
        depends on EXPERIMENTAL
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index f67bc9b..c69a7d7 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1740,6 +1740,7 @@
 /*
  * BBP 3: RX Antenna
  */
+#define BBP3_RX_ADC				FIELD8(0x03)
 #define BBP3_RX_ANTENNA			FIELD8(0x18)
 #define BBP3_HT40_MINUS			FIELD8(0x20)
 
@@ -1783,6 +1784,8 @@
 #define RFCSR1_TX0_PD			FIELD8(0x08)
 #define RFCSR1_RX1_PD			FIELD8(0x10)
 #define RFCSR1_TX1_PD			FIELD8(0x20)
+#define RFCSR1_RX2_PD			FIELD8(0x40)
+#define RFCSR1_TX2_PD			FIELD8(0x80)
 
 /*
  * RFCSR 2:
@@ -1790,15 +1793,25 @@
 #define RFCSR2_RESCAL_EN		FIELD8(0x80)
 
 /*
+ * FRCSR 5:
+ */
+#define RFCSR5_R1			FIELD8(0x0c)
+
+/*
  * RFCSR 6:
  */
 #define RFCSR6_R1			FIELD8(0x03)
 #define RFCSR6_R2			FIELD8(0x40)
+#define RFCSR6_TXDIV		FIELD8(0x0c)
 
 /*
  * RFCSR 7:
  */
 #define RFCSR7_RF_TUNING		FIELD8(0x01)
+#define RFCSR7_R02				FIELD8(0x07)
+#define RFCSR7_R3				FIELD8(0x08)
+#define RFCSR7_R45				FIELD8(0x30)
+#define RFCSR7_R67				FIELD8(0xc0)
 
 /*
  * RFCSR 11:
@@ -1809,11 +1822,13 @@
  * RFCSR 12:
  */
 #define RFCSR12_TX_POWER		FIELD8(0x1f)
+#define RFCSR12_DR0				FIELD8(0xe0)
 
 /*
  * RFCSR 13:
  */
 #define RFCSR13_TX_POWER		FIELD8(0x1f)
+#define RFCSR13_DR0				FIELD8(0xe0)
 
 /*
  * RFCSR 15:
@@ -2256,6 +2271,7 @@
 #define MCU_ANT_SELECT			0X73
 #define MCU_BBP_SIGNAL			0x80
 #define MCU_POWER_SAVE			0x83
+#define MCU_BAND_SELECT		0x91
 
 /*
  * MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 2a6aa85..84ab7d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -401,7 +401,8 @@
 		return -EBUSY;
 
 	if (rt2x00_is_pci(rt2x00dev)) {
-		if (rt2x00_rt(rt2x00dev, RT5390)) {
+		if (rt2x00_rt(rt2x00dev, RT3572) ||
+		    rt2x00_rt(rt2x00dev, RT5390)) {
 			rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
 			rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
 			rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -600,49 +601,6 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
 
-static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
-{
-	__le32 *txwi;
-	u32 word;
-	int wcid, ack, pid;
-	int tx_wcid, tx_ack, tx_pid;
-
-	wcid	= rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
-	ack	= rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
-	pid	= rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
-
-	/*
-	 * This frames has returned with an IO error,
-	 * so the status report is not intended for this
-	 * frame.
-	 */
-	if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
-		rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
-		return false;
-	}
-
-	/*
-	 * Validate if this TX status report is intended for
-	 * this entry by comparing the WCID/ACK/PID fields.
-	 */
-	txwi = rt2800_drv_get_txwi(entry);
-
-	rt2x00_desc_read(txwi, 1, &word);
-	tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-	tx_ack  = rt2x00_get_field32(word, TXWI_W1_ACK);
-	tx_pid  = rt2x00_get_field32(word, TXWI_W1_PACKETID);
-
-	if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
-		WARNING(entry->queue->rt2x00dev,
-			"TX status report missed for queue %d entry %d\n",
-		entry->queue->qid, entry->entry_idx);
-		rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
-		return false;
-	}
-
-	return true;
-}
-
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -725,45 +683,6 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
 
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
-{
-	struct data_queue *queue;
-	struct queue_entry *entry;
-	u32 reg;
-	u8 qid;
-
-	while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
-
-		/* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
-		 * qid is guaranteed to be one of the TX QIDs
-		 */
-		qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
-		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-		if (unlikely(!queue)) {
-			WARNING(rt2x00dev, "Got TX status for an unavailable "
-					   "queue %u, dropping\n", qid);
-			continue;
-		}
-
-		/*
-		 * Inside each queue, we process each entry in a chronological
-		 * order. We first check that the queue is not empty.
-		 */
-		entry = NULL;
-		while (!rt2x00queue_empty(queue)) {
-			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-			if (rt2800_txdone_entry_check(entry, reg))
-				break;
-		}
-
-		if (!entry || rt2x00queue_empty(queue))
-			break;
-
-		rt2800_txdone_entry(entry, reg);
-	}
-}
-EXPORT_SYMBOL_GPL(rt2800_txdone);
-
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -1355,7 +1274,7 @@
 			gf20_rate = gf40_rate = 0x0003;
 		}
 		break;
-	};
+	}
 
 	/* check for STAs not supporting greenfield mode */
 	if (any_sta_nongf)
@@ -1433,6 +1352,40 @@
 }
 EXPORT_SYMBOL_GPL(rt2800_config_erp);
 
+static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	u16 eeprom;
+	u8 led_ctrl, led_g_mode, led_r_mode;
+
+	rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+		rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
+		rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
+	} else {
+		rt2x00_set_field32(&reg, GPIO_SWITCH_0, 0);
+		rt2x00_set_field32(&reg, GPIO_SWITCH_1, 0);
+	}
+	rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+
+	rt2800_register_read(rt2x00dev, LED_CFG, &reg);
+	led_g_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 3 : 0;
+	led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
+	if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
+	    led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+		led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
+		if (led_ctrl == 0 || led_ctrl > 0x40) {
+			rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
+			rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, led_r_mode);
+			rt2800_register_write(rt2x00dev, LED_CFG, reg);
+		} else {
+			rt2800_mcu_request(rt2x00dev, MCU_BAND_SELECT, 0xff,
+					   (led_g_mode << 2) | led_r_mode, 1);
+		}
+	}
+}
+
 static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
 				     enum antenna ant)
 {
@@ -1463,6 +1416,10 @@
 	rt2800_bbp_read(rt2x00dev, 1, &r1);
 	rt2800_bbp_read(rt2x00dev, 3, &r3);
 
+	if (rt2x00_rt(rt2x00dev, RT3572) &&
+	    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+		rt2800_config_3572bt_ant(rt2x00dev);
+
 	/*
 	 * Configure the TX antenna.
 	 */
@@ -1471,7 +1428,11 @@
 		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
 		break;
 	case 2:
-		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
+		if (rt2x00_rt(rt2x00dev, RT3572) &&
+		    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+			rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
+		else
+			rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
 		break;
 	case 3:
 		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
@@ -1496,7 +1457,15 @@
 		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
 		break;
 	case 2:
-		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+		if (rt2x00_rt(rt2x00dev, RT3572) &&
+		    test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+			rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
+			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
+				rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+			rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
+		} else {
+			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+		}
 		break;
 	case 3:
 		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
@@ -1630,6 +1599,161 @@
 	rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 }
 
+static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
+					 struct ieee80211_conf *conf,
+					 struct rf_channel *rf,
+					 struct channel_info *info)
+{
+	u8 rfcsr;
+	u32 reg;
+
+	if (rf->channel <= 14) {
+		rt2800_bbp_write(rt2x00dev, 25, 0x15);
+		rt2800_bbp_write(rt2x00dev, 26, 0x85);
+	} else {
+		rt2800_bbp_write(rt2x00dev, 25, 0x09);
+		rt2800_bbp_write(rt2x00dev, 26, 0xff);
+	}
+
+	rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
+	rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
+
+	rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
+	if (rf->channel <= 14)
+		rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 2);
+	else
+		rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 1);
+	rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 5, &rfcsr);
+	if (rf->channel <= 14)
+		rt2x00_set_field8(&rfcsr, RFCSR5_R1, 1);
+	else
+		rt2x00_set_field8(&rfcsr, RFCSR5_R1, 2);
+	rt2800_rfcsr_write(rt2x00dev, 5, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+	if (rf->channel <= 14) {
+		rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
+		rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+				(info->default_power1 & 0x3) |
+				((info->default_power1 & 0xC) << 1));
+	} else {
+		rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7);
+		rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+				(info->default_power1 & 0x3) |
+				((info->default_power1 & 0xC) << 1));
+	}
+	rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+	if (rf->channel <= 14) {
+		rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
+		rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+				(info->default_power2 & 0x3) |
+				((info->default_power2 & 0xC) << 1));
+	} else {
+		rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7);
+		rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+				(info->default_power2 & 0x3) |
+				((info->default_power2 & 0xC) << 1));
+	}
+	rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+	rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+	rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+	rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+	rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+	if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+		if (rf->channel <= 14) {
+			rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+			rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+		}
+		rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+		rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+	} else {
+		switch (rt2x00dev->default_ant.tx_chain_num) {
+		case 1:
+			rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+		case 2:
+			rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+			break;
+		}
+
+		switch (rt2x00dev->default_ant.rx_chain_num) {
+		case 1:
+			rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+		case 2:
+			rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+			break;
+		}
+	}
+	rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
+	rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+
+	rt2800_rfcsr_write(rt2x00dev, 24,
+			      rt2x00dev->calibration[conf_is_ht40(conf)]);
+	rt2800_rfcsr_write(rt2x00dev, 31,
+			      rt2x00dev->calibration[conf_is_ht40(conf)]);
+
+	if (rf->channel <= 14) {
+		rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
+		rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
+		rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+		rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
+		rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+		rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+		rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+		rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
+		rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
+		rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
+		rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+		rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
+	} else {
+		rt2800_rfcsr_write(rt2x00dev, 7, 0x14);
+		rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+		rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+		rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 15, 0x43);
+		rt2800_rfcsr_write(rt2x00dev, 16, 0x7a);
+		rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+		if (rf->channel <= 64) {
+			rt2800_rfcsr_write(rt2x00dev, 19, 0xb7);
+			rt2800_rfcsr_write(rt2x00dev, 20, 0xf6);
+			rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+		} else if (rf->channel <= 128) {
+			rt2800_rfcsr_write(rt2x00dev, 19, 0x74);
+			rt2800_rfcsr_write(rt2x00dev, 20, 0xf4);
+			rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+		} else {
+			rt2800_rfcsr_write(rt2x00dev, 19, 0x72);
+			rt2800_rfcsr_write(rt2x00dev, 20, 0xf3);
+			rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+		}
+		rt2800_rfcsr_write(rt2x00dev, 26, 0x87);
+		rt2800_rfcsr_write(rt2x00dev, 27, 0x01);
+		rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
+	}
+
+	rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+	rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0);
+	if (rf->channel <= 14)
+		rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1);
+	else
+		rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0);
+	rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
+	rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+	rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
+}
 
 #define RT5390_POWER_BOUND     0x27
 #define RT5390_FREQ_OFFSET_BOUND       0x5f
@@ -1748,9 +1872,10 @@
 	    rt2x00_rf(rt2x00dev, RF3020) ||
 	    rt2x00_rf(rt2x00dev, RF3021) ||
 	    rt2x00_rf(rt2x00dev, RF3022) ||
-	    rt2x00_rf(rt2x00dev, RF3052) ||
 	    rt2x00_rf(rt2x00dev, RF3320))
 		rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+	else if (rt2x00_rf(rt2x00dev, RF3052))
+		rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
 	else if (rt2x00_rf(rt2x00dev, RF5370) ||
 		 rt2x00_rf(rt2x00dev, RF5390))
 		rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
@@ -1777,7 +1902,10 @@
 			}
 		}
 	} else {
-		rt2800_bbp_write(rt2x00dev, 82, 0xf2);
+		if (rt2x00_rt(rt2x00dev, RT3572))
+			rt2800_bbp_write(rt2x00dev, 82, 0x94);
+		else
+			rt2800_bbp_write(rt2x00dev, 82, 0xf2);
 
 		if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
 			rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -1791,12 +1919,17 @@
 	rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
 	rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
 
+	if (rt2x00_rt(rt2x00dev, RT3572))
+		rt2800_rfcsr_write(rt2x00dev, 8, 0);
+
 	tx_pin = 0;
 
 	/* Turn on unused PA or LNA when not using 1T or 1R */
 	if (rt2x00dev->default_ant.tx_chain_num == 2) {
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
-		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
+				   rf->channel > 14);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
+				   rf->channel <= 14);
 	}
 
 	/* Turn on unused PA or LNA when not using 1T or 1R */
@@ -1809,11 +1942,18 @@
 	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
 	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
 	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
-	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
+	if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
+	else
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
+				   rf->channel <= 14);
 	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
 
 	rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
 
+	if (rt2x00_rt(rt2x00dev, RT3572))
+		rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+
 	rt2800_bbp_read(rt2x00dev, 4, &bbp);
 	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
 	rt2800_bbp_write(rt2x00dev, 4, bbp);
@@ -2413,6 +2553,9 @@
 		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
 		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
 		rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
+	} else if (rt2x00_rt(rt2x00dev, RT3572)) {
+		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
 	} else if (rt2x00_rt(rt2x00dev, RT5390)) {
 		rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
 		rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2799,6 +2942,7 @@
 	}
 
 	if (rt2800_is_305x_soc(rt2x00dev) ||
+	    rt2x00_rt(rt2x00dev, RT3572) ||
 	    rt2x00_rt(rt2x00dev, RT5390))
 		rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
@@ -2828,6 +2972,7 @@
 	    rt2x00_rt(rt2x00dev, RT3071) ||
 	    rt2x00_rt(rt2x00dev, RT3090) ||
 	    rt2x00_rt(rt2x00dev, RT3390) ||
+	    rt2x00_rt(rt2x00dev, RT3572) ||
 	    rt2x00_rt(rt2x00dev, RT5390)) {
 		rt2800_bbp_write(rt2x00dev, 79, 0x13);
 		rt2800_bbp_write(rt2x00dev, 80, 0x05);
@@ -2868,6 +3013,7 @@
 	    rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
 	    rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
 	    rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+	    rt2x00_rt(rt2x00dev, RT3572) ||
 	    rt2x00_rt(rt2x00dev, RT5390) ||
 	    rt2800_is_305x_soc(rt2x00dev))
 		rt2800_bbp_write(rt2x00dev, 103, 0xc0);
@@ -2895,6 +3041,7 @@
 	if (rt2x00_rt(rt2x00dev, RT3071) ||
 	    rt2x00_rt(rt2x00dev, RT3090) ||
 	    rt2x00_rt(rt2x00dev, RT3390) ||
+	    rt2x00_rt(rt2x00dev, RT3572) ||
 	    rt2x00_rt(rt2x00dev, RT5390)) {
 		rt2800_bbp_read(rt2x00dev, 138, &value);
 
@@ -3031,6 +3178,7 @@
 	    !rt2x00_rt(rt2x00dev, RT3071) &&
 	    !rt2x00_rt(rt2x00dev, RT3090) &&
 	    !rt2x00_rt(rt2x00dev, RT3390) &&
+	    !rt2x00_rt(rt2x00dev, RT3572) &&
 	    !rt2x00_rt(rt2x00dev, RT5390) &&
 	    !rt2800_is_305x_soc(rt2x00dev))
 		return 0;
@@ -3109,6 +3257,38 @@
 		rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
 		rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
 		rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+	} else if (rt2x00_rt(rt2x00dev, RT3572)) {
+		rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
+		rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
+		rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+		rt2800_rfcsr_write(rt2x00dev, 3, 0x02);
+		rt2800_rfcsr_write(rt2x00dev, 4, 0x4c);
+		rt2800_rfcsr_write(rt2x00dev, 5, 0x05);
+		rt2800_rfcsr_write(rt2x00dev, 6, 0x4a);
+		rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
+		rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
+		rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+		rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
+		rt2800_rfcsr_write(rt2x00dev, 12, 0x70);
+		rt2800_rfcsr_write(rt2x00dev, 13, 0x65);
+		rt2800_rfcsr_write(rt2x00dev, 14, 0xa0);
+		rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+		rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+		rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+		rt2800_rfcsr_write(rt2x00dev, 18, 0xac);
+		rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
+		rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
+		rt2800_rfcsr_write(rt2x00dev, 21, 0xd0);
+		rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 23, 0x3c);
+		rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
+		rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
+		rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+		rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+		rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
+		rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
+		rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
 	} else if (rt2800_is_305x_soc(rt2x00dev)) {
 		rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
 		rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
@@ -3258,6 +3438,19 @@
 		rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
 		rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
 		rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+	} else if (rt2x00_rt(rt2x00dev, RT3572)) {
+		rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+		rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
+		rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+		rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+		rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+		rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+		rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+		msleep(1);
+		rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+		rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+		rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
 	}
 
 	/*
@@ -3270,7 +3463,8 @@
 			rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
 	} else if (rt2x00_rt(rt2x00dev, RT3071) ||
 		   rt2x00_rt(rt2x00dev, RT3090) ||
-		   rt2x00_rt(rt2x00dev, RT3390)) {
+		   rt2x00_rt(rt2x00dev, RT3390) ||
+		   rt2x00_rt(rt2x00dev, RT3572)) {
 		rt2x00dev->calibration[0] =
 			rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
 		rt2x00dev->calibration[1] =
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index f2d1594..69deb31 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -152,7 +152,6 @@
 			  struct txentry_desc *txdesc);
 void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
 
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
 void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
 
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cc4a54f..9ccc537 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -219,7 +219,7 @@
 		break;
 	default:
 		break;
-	};
+	}
 }
 
 static void rt2800pci_kick_queue(struct data_queue *queue)
@@ -501,7 +501,9 @@
 	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
 	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 
-	if (rt2x00_rt(rt2x00dev, RT5390)) {
+	if (rt2x00_is_pcie(rt2x00dev) &&
+	    (rt2x00_rt(rt2x00dev, RT3572) ||
+	     rt2x00_rt(rt2x00dev, RT5390))) {
 		rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg);
 		rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
 		rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ba82c97..6e92298 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -457,6 +457,87 @@
 /*
  * TX control handlers
  */
+static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
+{
+	__le32 *txwi;
+	u32 word;
+	int wcid, ack, pid;
+	int tx_wcid, tx_ack, tx_pid;
+
+	wcid	= rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+	ack	= rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+	pid	= rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+
+	/*
+	 * This frames has returned with an IO error,
+	 * so the status report is not intended for this
+	 * frame.
+	 */
+	if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
+		rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
+		return false;
+	}
+
+	/*
+	 * Validate if this TX status report is intended for
+	 * this entry by comparing the WCID/ACK/PID fields.
+	 */
+	txwi = rt2800usb_get_txwi(entry);
+
+	rt2x00_desc_read(txwi, 1, &word);
+	tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+	tx_ack  = rt2x00_get_field32(word, TXWI_W1_ACK);
+	tx_pid  = rt2x00_get_field32(word, TXWI_W1_PACKETID);
+
+	if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
+		WARNING(entry->queue->rt2x00dev,
+			"TX status report missed for queue %d entry %d\n",
+		entry->queue->qid, entry->entry_idx);
+		rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
+		return false;
+	}
+
+	return true;
+}
+
+static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
+{
+	struct data_queue *queue;
+	struct queue_entry *entry;
+	u32 reg;
+	u8 qid;
+
+	while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+
+		/* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
+		 * qid is guaranteed to be one of the TX QIDs
+		 */
+		qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
+		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+		if (unlikely(!queue)) {
+			WARNING(rt2x00dev, "Got TX status for an unavailable "
+					   "queue %u, dropping\n", qid);
+			continue;
+		}
+
+		/*
+		 * Inside each queue, we process each entry in a chronological
+		 * order. We first check that the queue is not empty.
+		 */
+		entry = NULL;
+		while (!rt2x00queue_empty(queue)) {
+			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+			if (rt2800usb_txdone_entry_check(entry, reg))
+				break;
+		}
+
+		if (!entry || rt2x00queue_empty(queue))
+			break;
+
+		rt2800_txdone_entry(entry, reg);
+	}
+}
+
 static void rt2800usb_work_txdone(struct work_struct *work)
 {
 	struct rt2x00_dev *rt2x00dev =
@@ -464,7 +545,7 @@
 	struct data_queue *queue;
 	struct queue_entry *entry;
 
-	rt2800_txdone(rt2x00dev);
+	rt2800usb_txdone(rt2x00dev);
 
 	/*
 	 * Process any trailing TX status reports for IO failures,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index c446db6..4efaf886 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -29,6 +29,7 @@
 #define RT2X00_H
 
 #include <linux/bitops.h>
+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/firmware.h>
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ab8c16f..c7fc9de 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -206,7 +206,6 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
 	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
-	unsigned long irqflags;
 
 	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 		return;
@@ -227,14 +226,14 @@
 	 * sequence counting per-frame, since those will override the
 	 * sequence counter given by mac80211.
 	 */
-	spin_lock_irqsave(&intf->seqlock, irqflags);
+	spin_lock(&intf->seqlock);
 
 	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 		intf->seqno += 0x10;
 	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 	hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
 
-	spin_unlock_irqrestore(&intf->seqlock, irqflags);
+	spin_unlock(&intf->seqlock);
 
 }
 
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 80db5ca..66b29dc 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index ccb6da3..fb5e43b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -888,7 +888,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	struct rtl_tid_data *tid_data;
 	struct rtl_sta_info *sta_entry = NULL;
 
 	if (sta == NULL)
@@ -906,7 +905,6 @@
 		return -EINVAL;
 
 	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
-	tid_data = &sta_entry->tids[tid];
 	sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
 
 	ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid);
@@ -918,7 +916,6 @@
 		struct ieee80211_sta *sta, u16 tid)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_tid_data *tid_data;
 	struct rtl_sta_info *sta_entry = NULL;
 
 	if (sta == NULL)
@@ -936,7 +933,6 @@
 		return -EINVAL;
 
 	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
-	tid_data = &sta_entry->tids[tid];
 	sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
 
 	return 0;
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 50de6f5..0b56232 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -925,7 +925,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct pgpkt_struct target_pkt;
 	u8 write_state = PG_STATE_HEADER;
-	int continual = true, dataempty = true, result = true;
+	int continual = true, result = true;
 	u16 efuse_addr = 0;
 	u8 efuse_data;
 	u8 target_word_cnts = 0;
@@ -953,7 +953,6 @@
 	       (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
 
 		if (write_state == PG_STATE_HEADER) {
-			dataempty = true;
 			badworden = 0x0F;
 			RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
 				("efuse PG_STATE_HEADER\n"));
@@ -1176,13 +1175,12 @@
 {
 	int continual = true;
 	u16 efuse_addr = 0;
-	u8 hoffset, hworden;
+	u8 hworden;
 	u8 efuse_data, word_cnts;
 
 	while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
 	       && (efuse_addr < EFUSE_MAX_SIZE)) {
 		if (efuse_data != 0xFF) {
-			hoffset = (efuse_data >> 4) & 0x0F;
 			hworden = efuse_data & 0x0F;
 			word_cnts = efuse_calculate_word_cnts(hworden);
 			efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 9f8ccae..e502db0 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -622,7 +622,7 @@
 	if (((rtlpriv->link_info.num_rx_inperiod +
 		rtlpriv->link_info.num_tx_inperiod) > 8) ||
 		(rtlpriv->link_info.num_rx_inperiod > 2)) {
-		rtl_lps_leave(hw);
+		tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
 	}
 }
 
@@ -644,22 +644,23 @@
 		.noise = -98,
 		.rate = 0,
 	};
+	int index = rtlpci->rx_ring[rx_queue_idx].idx;
 
 	/*RX NORMAL PKT */
 	while (count--) {
 		/*rx descriptor */
 		struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
-				rtlpci->rx_ring[rx_queue_idx].idx];
+				index];
 		/*rx pkt */
 		struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
-				rtlpci->rx_ring[rx_queue_idx].idx];
+				index];
 
 		own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
 						       false, HW_DESC_OWN);
 
 		if (own) {
 			/*wait data to be filled by hardware */
-			return;
+			break;
 		} else {
 			struct ieee80211_hdr *hdr;
 			__le16 fc;
@@ -763,15 +764,12 @@
 			if (((rtlpriv->link_info.num_rx_inperiod +
 				rtlpriv->link_info.num_tx_inperiod) > 8) ||
 				(rtlpriv->link_info.num_rx_inperiod > 2)) {
-				rtl_lps_leave(hw);
+				tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
 			}
 
 			skb = new_skb;
 
-			rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
-							     rx_ring
-							     [rx_queue_idx].
-							     idx] = skb;
+			rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
 			*((dma_addr_t *) skb->cb) =
 			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
 					   rtlpci->rxbuffersize,
@@ -784,23 +782,22 @@
 		rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
 					    HW_DESC_RXBUFF_ADDR,
 					    (u8 *)&bufferaddress);
-		rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
-					    (u8 *)&tmp_one);
 		rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
 					    HW_DESC_RXPKT_LEN,
 					    (u8 *)&rtlpci->rxbuffersize);
 
-		if (rtlpci->rx_ring[rx_queue_idx].idx ==
-		    rtlpci->rxringcount - 1)
+		if (index == rtlpci->rxringcount - 1)
 			rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
 						    HW_DESC_RXERO,
 						    (u8 *)&tmp_one);
 
-		rtlpci->rx_ring[rx_queue_idx].idx =
-		    (rtlpci->rx_ring[rx_queue_idx].idx + 1) %
-		    rtlpci->rxringcount;
+		rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+					    (u8 *)&tmp_one);
+
+		index = (index + 1) % rtlpci->rxringcount;
 	}
 
+	rtlpci->rx_ring[rx_queue_idx].idx = index;
 }
 
 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
@@ -938,6 +935,11 @@
 	_rtl_pci_tx_chk_waitq(hw);
 }
 
+static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
+{
+	rtl_lps_leave(hw);
+}
+
 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1036,6 +1038,9 @@
 	tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
 		     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
 		     (unsigned long)hw);
+	tasklet_init(&rtlpriv->works.ips_leave_tasklet,
+		     (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
+		     (unsigned long)hw);
 }
 
 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1505,6 +1510,7 @@
 
 	synchronize_irq(rtlpci->pdev->irq);
 	tasklet_kill(&rtlpriv->works.irq_tasklet);
+	tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
 
 	flush_workqueue(rtlpriv->works.rtl_wq);
 	destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1579,6 +1585,7 @@
 	set_hal_stop(rtlhal);
 
 	rtlpriv->cfg->ops->disable_interrupt(hw);
+	tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
 
 	spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
 	while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 39b0297..d14c13d 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -68,6 +68,7 @@
 
 	/*<2> Disable Interrupt */
 	rtlpriv->cfg->ops->disable_interrupt(hw);
+	tasklet_kill(&rtlpriv->works.irq_tasklet);
 
 	/*<3> Disable Adapter */
 	rtlpriv->cfg->ops->hw_disable(hw);
@@ -82,10 +83,8 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	enum rf_pwrstate rtstate;
 	bool actionallowed = false;
 	u16 rfwait_cnt = 0;
-	unsigned long flag;
 
 	/*protect_or_not = true; */
 
@@ -98,10 +97,9 @@
 	 *should wait to be executed.
 	 */
 	while (true) {
-		spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
 		if (ppsc->rfchange_inprogress) {
-			spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
-					       flag);
+			spin_unlock(&rtlpriv->locks.rf_ps_lock);
 
 			RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
 				 ("RF Change in progress!"
@@ -122,15 +120,12 @@
 			}
 		} else {
 			ppsc->rfchange_inprogress = true;
-			spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
-					       flag);
+			spin_unlock(&rtlpriv->locks.rf_ps_lock);
 			break;
 		}
 	}
 
 no_protect:
-	rtstate = ppsc->rfpwr_state;
-
 	switch (state_toset) {
 	case ERFON:
 		ppsc->rfoff_reason &= (~changesource);
@@ -173,9 +168,9 @@
 		rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
 
 	if (!protect_or_not) {
-		spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+		spin_lock(&rtlpriv->locks.rf_ps_lock);
 		ppsc->rfchange_inprogress = false;
-		spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
 	}
 
 	return actionallowed;
@@ -289,12 +284,11 @@
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	enum rf_pwrstate rtstate;
-	unsigned long flags;
 
 	if (mac->opmode != NL80211_IFTYPE_STATION)
 		return;
 
-	spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
+	spin_lock(&rtlpriv->locks.ips_lock);
 
 	if (ppsc->inactiveps) {
 		rtstate = ppsc->rfpwr_state;
@@ -310,7 +304,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
+	spin_unlock(&rtlpriv->locks.ips_lock);
 }
 
 /*for FW LPS*/
@@ -428,7 +422,6 @@
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	unsigned long flag;
 
 	if (!ppsc->fwctrl_lps)
 		return;
@@ -449,7 +442,7 @@
 	if (mac->link_state != MAC80211_LINKED)
 		return;
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	spin_lock(&rtlpriv->locks.lps_lock);
 
 	/* Idle for a while if we connect to AP a while ago. */
 	if (mac->cnt_after_linked >= 2) {
@@ -461,7 +454,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	spin_unlock(&rtlpriv->locks.lps_lock);
 }
 
 /*Leave the leisure power save mode.*/
@@ -470,9 +463,8 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	unsigned long flag;
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	spin_lock(&rtlpriv->locks.lps_lock);
 
 	if (ppsc->fwctrl_lps) {
 		if (ppsc->dot11_psmode != EACTIVE) {
@@ -493,7 +485,7 @@
 			rtl_lps_set_psmode(hw, EACTIVE);
 		}
 	}
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	spin_unlock(&rtlpriv->locks.lps_lock);
 }
 
 /* For sw LPS*/
@@ -582,7 +574,6 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	unsigned long flag;
 
 	if (!rtlpriv->psc.swctrl_lps)
 		return;
@@ -595,9 +586,9 @@
 		RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
 	}
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	spin_lock(&rtlpriv->locks.lps_lock);
 	rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	spin_unlock(&rtlpriv->locks.lps_lock);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -614,7 +605,6 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	unsigned long flag;
 	u8 sleep_intv;
 
 	if (!rtlpriv->psc.sw_ps_enabled)
@@ -631,16 +621,16 @@
 	if (rtlpriv->link_info.busytraffic)
 		return;
 
-	spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+	spin_lock(&rtlpriv->locks.rf_ps_lock);
 	if (rtlpriv->psc.rfchange_inprogress) {
-		spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+		spin_unlock(&rtlpriv->locks.rf_ps_lock);
 		return;
 	}
-	spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+	spin_unlock(&rtlpriv->locks.rf_ps_lock);
 
-	spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+	spin_lock(&rtlpriv->locks.lps_lock);
 	rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS, false);
-	spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+	spin_unlock(&rtlpriv->locks.lps_lock);
 
 	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
 		!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 8f6718f..9fedb1f 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -303,22 +303,6 @@
 	return;
 }
 
-static void _rtl_dump_channel_map(struct wiphy *wiphy)
-{
-	enum ieee80211_band band;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *ch;
-	unsigned int i;
-
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
-		if (!wiphy->bands[band])
-			continue;
-		sband = wiphy->bands[band];
-		for (i = 0; i < sband->n_channels; i++)
-			ch = &sband->channels[i];
-	}
-}
-
 static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
 				   struct regulatory_request *request,
 				   struct rtl_regulatory *reg)
@@ -336,8 +320,6 @@
 		break;
 	}
 
-	_rtl_dump_channel_map(wiphy);
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 50303e1..f9f2370 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -546,7 +546,6 @@
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl8192_tx_ring *ring;
 	struct rtl_tx_desc *pdesc;
-	u8 own;
 	unsigned long flags;
 	struct sk_buff *pskb = NULL;
 
@@ -559,7 +558,6 @@
 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
 	pdesc = &ring->desc[0];
-	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
 
 	rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index d2cc815..3b11642 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1253,10 +1253,9 @@
 
 	const u32 retrycount = 2;
 
-	u32 bbvalue;
-
 	if (t == 0) {
-		bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+		/* dummy read */
+		rtl_get_bbreg(hw, 0x800, MASKDWORD);
 
 		_rtl92c_phy_save_adda_registers(hw, adda_reg,
 						rtlphy->adda_backup, 16);
@@ -1762,8 +1761,7 @@
 	long result[4][8];
 	u8 i, final_candidate;
 	bool patha_ok, pathb_ok;
-	long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
-	    reg_ecc, reg_tmp = 0;
+	long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4, reg_tmp = 0;
 	bool is12simular, is13simular, is23simular;
 	bool start_conttx = false, singletone = false;
 	u32 iqk_bb_reg[10] = {
@@ -1841,21 +1839,17 @@
 		reg_e94 = result[i][0];
 		reg_e9c = result[i][1];
 		reg_ea4 = result[i][2];
-		reg_eac = result[i][3];
 		reg_eb4 = result[i][4];
 		reg_ebc = result[i][5];
 		reg_ec4 = result[i][6];
-		reg_ecc = result[i][7];
 	}
 	if (final_candidate != 0xff) {
 		rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
 		rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
 		reg_ea4 = result[final_candidate][2];
-		reg_eac = result[final_candidate][3];
 		rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
 		rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
 		reg_ec4 = result[final_candidate][6];
-		reg_ecc = result[final_candidate][7];
 		patha_ok = pathb_ok = true;
 	} else {
 		rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index defb437..944f55e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -763,11 +763,9 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
 	u8 reg_bw_opmode;
-	u32 reg_ratr, reg_prsr;
+	u32 reg_prsr;
 
 	reg_bw_opmode = BW_OPMODE_20MHZ;
-	reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
-	    RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
 	reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
 
 	rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
@@ -1196,6 +1194,7 @@
 	rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
 	rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
 	rtlpci->irq_enabled = false;
+	synchronize_irq(rtlpci->pdev->irq);
 }
 
 static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1969,7 +1968,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-	enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+	enum rf_pwrstate e_rfpowerstate_toset;
 	u8 u1tmp;
 	bool actuallyset = false;
 	unsigned long flag;
@@ -1989,8 +1988,6 @@
 		spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
 	}
 
-	cur_rfstate = ppsc->rfpwr_state;
-
 	rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
 		       REG_MAC_PINMUX_CFG)&~(BIT(3)));
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index abe0fcc..592a10a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -46,13 +46,12 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	u32 original_value, readback_value, bitshift;
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
-	unsigned long flags;
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
 					       "rfpath(%#x), bitmask(%#x)\n",
 					       regaddr, rfpath, bitmask));
 
-	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+	spin_lock(&rtlpriv->locks.rf_lock);
 
 	if (rtlphy->rf_mode != RF_OP_BY_FW) {
 		original_value = _rtl92c_phy_rf_serial_read(hw,
@@ -65,7 +64,7 @@
 	bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
 	readback_value = (original_value & bitmask) >> bitshift;
 
-	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+	spin_unlock(&rtlpriv->locks.rf_lock);
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
 		 ("regaddr(%#x), rfpath(%#x), "
@@ -120,13 +119,12 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	u32 original_value, bitshift;
-	unsigned long flags;
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
 		 ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
 		  regaddr, bitmask, data, rfpath));
 
-	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+	spin_lock(&rtlpriv->locks.rf_lock);
 
 	if (rtlphy->rf_mode != RF_OP_BY_FW) {
 		if (bitmask != RFREG_OFFSET_MASK) {
@@ -153,7 +151,7 @@
 		_rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
 	}
 
-	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+	spin_unlock(&rtlpriv->locks.rf_lock);
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
 					       "bitmask(%#x), data(%#x), "
@@ -281,7 +279,6 @@
 {
 
 	int i;
-	bool rtstatus = true;
 	u32 *radioa_array_table;
 	u32 *radiob_array_table;
 	u16 radioa_arraylen, radiob_arraylen;
@@ -308,7 +305,6 @@
 			 ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
 	}
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
-	rtstatus = true;
 	switch (rfpath) {
 	case RF90_PATH_A:
 		for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -521,7 +517,6 @@
 	u8 i, queue_id;
 	struct rtl8192_tx_ring *ring = NULL;
 
-	ppsc->set_rfpowerstate_inprogress = true;
 	switch (rfpwr_state) {
 	case ERFON:{
 			if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -617,7 +612,6 @@
 	}
 	if (bresult)
 		ppsc->rfpwr_state = rfpwr_state;
-	ppsc->set_rfpowerstate_inprogress = false;
 	return bresult;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 54b2bd5..2492cc2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -592,7 +592,6 @@
 	struct ieee80211_hdr *hdr;
 	u8 *tmp_buf;
 	u8 *praddr;
-	u8 *psaddr;
 	__le16 fc;
 	u16 type, c_fc;
 	bool packet_matchbssid, packet_toself, packet_beacon;
@@ -604,7 +603,6 @@
 	c_fc = le16_to_cpu(fc);
 	type = WLAN_FC_GET_TYPE(fc);
 	praddr = hdr->addr1;
-	psaddr = hdr->addr2;
 
 	packet_matchbssid =
 	    ((IEEE80211_FTYPE_CTL != type) &&
@@ -932,6 +930,7 @@
 	if (istx == true) {
 		switch (desc_name) {
 		case HW_DESC_OWN:
+			wmb();
 			SET_TX_DESC_OWN(pdesc, 1);
 			break;
 		case HW_DESC_TX_NEXTDESC_ADDR:
@@ -945,6 +944,7 @@
 	} else {
 		switch (desc_name) {
 		case HW_DESC_RXOWN:
+			wmb();
 			SET_RX_DESC_OWN(pdesc, 1);
 			break;
 		case HW_DESC_RXBUFF_ADDR:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index f8514cb..4e057df 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1113,7 +1113,6 @@
 	struct ieee80211_hdr *hdr;
 	u8 *tmp_buf;
 	u8 *praddr;
-	u8 *psaddr;
 	__le16 fc;
 	u16 type, cpu_fc;
 	bool packet_matchbssid, packet_toself, packet_beacon;
@@ -1124,7 +1123,6 @@
 	cpu_fc = le16_to_cpu(fc);
 	type = WLAN_FC_GET_TYPE(fc);
 	praddr = hdr->addr1;
-	psaddr = hdr->addr2;
 	packet_matchbssid =
 	    ((IEEE80211_FTYPE_CTL != type) &&
 	     (!compare_ether_addr(mac->bssid,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 9a3d023..7285290 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -470,7 +470,6 @@
 	u8 i, queue_id;
 	struct rtl8192_tx_ring *ring = NULL;
 
-	ppsc->set_rfpowerstate_inprogress = true;
 	switch (rfpwr_state) {
 	case ERFON:
 		if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -590,7 +589,6 @@
 	}
 	if (bresult)
 		ppsc->rfpwr_state = rfpwr_state;
-	ppsc->set_rfpowerstate_inprogress = false;
 	return bresult;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index da86db8..609c7ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -222,7 +222,6 @@
 	u32 low_rssi_thresh = 0;
 	u32 middle_rssi_thresh = 0;
 	u32 high_rssi_thresh = 0;
-	u8 rssi_level;
 	struct ieee80211_sta *sta = NULL;
 
 	if (is_hal_stop(rtlhal))
@@ -272,18 +271,14 @@
 		if (rtlpriv->dm.undecorated_smoothed_pwdb >
 		    (long)high_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_HIGH;
-			rssi_level = 1;
 		} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
 			   (long)middle_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_LOW;
-			rssi_level = 3;
 		} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
 			   (long)low_rssi_thresh) {
 			ra->ratr_state = DM_RATR_STA_LOW;
-			rssi_level = 5;
 		} else {
 			ra->ratr_state = DM_RATR_STA_ULTRALOW;
-			rssi_level = 6;
 		}
 
 		if (ra->pre_ratr_state != ra->ratr_state) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
index 3b5af01..6f91a14 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -358,7 +358,6 @@
 	struct fw_priv *pfw_priv = NULL;
 	u8 *puc_mappedfile = NULL;
 	u32 ul_filelength = 0;
-	u32 file_length = 0;
 	u8 fwhdr_size = RT_8192S_FIRMWARE_HDR_SIZE;
 	u8 fwstatus = FW_STATUS_INIT;
 	bool rtstatus = true;
@@ -370,7 +369,6 @@
 	firmware->fwstatus = FW_STATUS_INIT;
 
 	puc_mappedfile = firmware->sz_fw_tmpbuffer;
-	file_length = firmware->sz_fw_tmpbufferlen;
 
 	/* 1. Retrieve FW header. */
 	firmware->pfwheader = (struct fw_hdr *) puc_mappedfile;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 2e9005d..35dd12d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -884,12 +884,10 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
 	u8 reg_bw_opmode = 0;
-	u32 reg_ratr = 0, reg_rrsr = 0;
+	u32 reg_rrsr = 0;
 	u8 regtmp = 0;
 
 	reg_bw_opmode = BW_OPMODE_20MHZ;
-	reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS |
-				RATE_ALL_OFDM_2SS;
 	reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
 
 	regtmp = rtl_read_byte(rtlpriv, INIRTSMCS_SEL);
@@ -1122,14 +1120,12 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
-	enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
 	u32 temp;
 	bt_msr &= ~MSR_LINK_MASK;
 
 	switch (type) {
 	case NL80211_IFTYPE_UNSPECIFIED:
 		bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
-		ledaction = LED_CTL_LINK;
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
 			 ("Set Network type to NO LINK!\n"));
 		break;
@@ -1140,7 +1136,6 @@
 		break;
 	case NL80211_IFTYPE_STATION:
 		bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
-		ledaction = LED_CTL_LINK;
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
 			 ("Set Network type to STA!\n"));
 		break;
@@ -1231,6 +1226,7 @@
 	rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
 
 	rtlpci->irq_enabled = false;
+	synchronize_irq(rtlpci->pdev->irq);
 }
 
 
@@ -2271,7 +2267,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-	enum rf_pwrstate rfpwr_toset, cur_rfstate;
+	enum rf_pwrstate rfpwr_toset /*, cur_rfstate */;
 	unsigned long flag = 0;
 	bool actuallyset = false;
 	bool turnonbypowerdomain = false;
@@ -2292,7 +2288,7 @@
 		spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
 	}
 
-	cur_rfstate = ppsc->rfpwr_state;
+	/* cur_rfstate = ppsc->rfpwr_state;*/
 
 	/* because after _rtl92s_phy_set_rfhalt, all power
 	 * closed, so we must open some power for GPIO check,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 63b45e6..7ee2dac 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -180,19 +180,18 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	u32 original_value, readback_value, bitshift;
-	unsigned long flags;
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
 		 "bitmask(%#x)\n", regaddr, rfpath, bitmask));
 
-	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+	spin_lock(&rtlpriv->locks.rf_lock);
 
 	original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
 
 	bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
 	readback_value = (original_value & bitmask) >> bitshift;
 
-	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+	spin_unlock(&rtlpriv->locks.rf_lock);
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
 		 "bitmask(%#x), original_value(%#x)\n", regaddr, rfpath,
@@ -207,7 +206,6 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	u32 original_value, bitshift;
-	unsigned long flags;
 
 	if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
 		return;
@@ -215,7 +213,7 @@
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
 		 " data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
 
-	spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+	spin_lock(&rtlpriv->locks.rf_lock);
 
 	if (bitmask != RFREG_OFFSET_MASK) {
 		original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
@@ -226,7 +224,7 @@
 
 	_rtl92s_phy_rf_serial_write(hw, rfpath, regaddr, data);
 
-	spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+	spin_unlock(&rtlpriv->locks.rf_lock);
 
 	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x), "
 		 "data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
@@ -263,7 +261,6 @@
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	u8 reg_bw_opmode;
-	u8 reg_prsr_rsc;
 
 	RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("Switch to %s bandwidth\n",
 		  rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
@@ -277,7 +274,8 @@
 	rtlphy->set_bwmode_inprogress = true;
 
 	reg_bw_opmode = rtl_read_byte(rtlpriv, BW_OPMODE);
-	reg_prsr_rsc = rtl_read_byte(rtlpriv, RRSR + 2);
+	/* dummy read */
+	rtl_read_byte(rtlpriv, RRSR + 2);
 
 	switch (rtlphy->current_chan_bw) {
 	case HT_CHANNEL_WIDTH_20:
@@ -546,8 +544,6 @@
 	if (rfpwr_state == ppsc->rfpwr_state)
 		return false;
 
-	ppsc->set_rfpowerstate_inprogress = true;
-
 	switch (rfpwr_state) {
 	case ERFON:{
 			if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -659,8 +655,6 @@
 	if (bresult)
 		ppsc->rfpwr_state = rfpwr_state;
 
-	ppsc->set_rfpowerstate_inprogress = false;
-
 	return bresult;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 5cf4423..d509cf6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -581,7 +581,6 @@
 	struct ieee80211_hdr *hdr;
 	u8 *tmp_buf;
 	u8 *praddr;
-	u8 *psaddr;
 	__le16 fc;
 	u16 type, cfc;
 	bool packet_matchbssid, packet_toself, packet_beacon;
@@ -593,7 +592,6 @@
 	cfc = le16_to_cpu(fc);
 	type = WLAN_FC_GET_TYPE(fc);
 	praddr = hdr->addr1;
-	psaddr = hdr->addr2;
 
 	packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
 	     (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
@@ -875,6 +873,7 @@
 		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
 		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
 
+		wmb();
 		SET_TX_DESC_OWN(pdesc, 1);
 	} else { /* H2C Command Desc format (Host TXCMD) */
 		/* 92SE must set as 1 for firmware download HW DMA error */
@@ -893,6 +892,7 @@
 		SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
 		SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
 
+		wmb();
 		SET_TX_DESC_OWN(pdesc, 1);
 
 	}
@@ -903,6 +903,7 @@
 	if (istx == true) {
 		switch (desc_name) {
 		case HW_DESC_OWN:
+			wmb();
 			SET_TX_DESC_OWN(pdesc, 1);
 			break;
 		case HW_DESC_TX_NEXTDESC_ADDR:
@@ -916,6 +917,7 @@
 	} else {
 		switch (desc_name) {
 		case HW_DESC_RXOWN:
+			wmb();
 			SET_RX_STATUS_DESC_OWN(pdesc, 1);
 			break;
 		case HW_DESC_RXBUFF_ADDR:
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 693395e..9d003e0 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1188,7 +1188,6 @@
 
 struct rtl_ps_ctl {
 	bool pwrdomain_protect;
-	bool set_rfpowerstate_inprogress;
 	bool in_powersavemode;
 	bool rfchange_inprogress;
 	bool swrf_processing;
@@ -1536,6 +1535,7 @@
 	/* For SW LPS */
 	struct delayed_work ps_work;
 	struct delayed_work ps_rfon_wq;
+	struct tasklet_struct ips_leave_tasklet;
 };
 
 struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index f51a024..f786942 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -19,6 +19,7 @@
  * Copyright (C) 2008 Google Inc
  * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
  */
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/mmc/sdio_func.h>
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index af6448c..eaa5f95 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -19,6 +19,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index b07f8b7..7ccec07 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -485,7 +485,8 @@
 	if (wl->bss_type == BSS_TYPE_AP_BSS)
 		wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
 	else
-		wl->event_mask |= DUMMY_PACKET_EVENT_ID;
+		wl->event_mask |= DUMMY_PACKET_EVENT_ID |
+			BA_SESSION_RX_CONSTRAINT_EVENT_ID;
 
 	ret = wl1271_event_unmask(wl);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index c3c554c..94bbd00 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -168,6 +168,21 @@
 	wl->last_rssi_event = event;
 }
 
+static void wl1271_stop_ba_event(struct wl1271 *wl, u8 ba_allowed)
+{
+	/* Convert the value to bool */
+	wl->ba_allowed = !!ba_allowed;
+
+	/*
+	 * Return in case:
+	 * there are not BA open or the event indication is to allowed BA
+	 */
+	if ((!wl->ba_rx_bitmap) || (wl->ba_allowed))
+		return;
+
+	ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, wl->bssid);
+}
+
 static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
 {
 	wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -252,6 +267,14 @@
 			wl1271_event_rssi_trigger(wl, mbox);
 	}
 
+	if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) {
+		wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
+			     "ba_allowed = 0x%x", mbox->ba_allowed);
+
+		if (wl->vif)
+			wl1271_stop_ba_event(wl, mbox->ba_allowed);
+	}
+
 	if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
 		wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
 		if (wl->vif)
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index b6cf06e..ce99adf 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -71,7 +71,7 @@
 	HEALTH_CHECK_REPLY_EVENT_ID		 = BIT(27),
 	PERIODIC_SCAN_COMPLETE_EVENT_ID		 = BIT(28),
 	PERIODIC_SCAN_REPORT_EVENT_ID		 = BIT(29),
-	BA_SESSION_TEAR_DOWN_EVENT_ID		 = BIT(30),
+	BA_SESSION_RX_CONSTRAINT_EVENT_ID	 = BIT(30),
 	EVENT_MBOX_ALL_EVENT_ID			 = 0x7fffffff,
 };
 
@@ -122,7 +122,20 @@
 	__le16 sta_aging_status;
 	__le16 sta_tx_retry_exceeded;
 
-	u8 reserved_5[24];
+	/*
+	 * Bitmap, Each bit set represents the Role ID for which this constraint
+	 * is set. Range: 0 - FF, FF means ANY role
+	 */
+	u8 ba_role_id;
+	/*
+	 * Bitmap, Each bit set represents the Link ID for which this constraint
+	 * is set. Not applicable if ba_role_id is set to ANY role (FF).
+	 * Range: 0 - FFFF, FFFF means ANY link in that role
+	 */
+	u8 ba_link_id;
+	u8 ba_allowed;
+
+	u8 reserved_5[21];
 } __packed;
 
 int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index a8f4f15..f5c2c9e 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -541,6 +541,7 @@
 
 	/* Reset the BA RX indicators */
 	wl->ba_rx_bitmap = 0;
+	wl->ba_allowed = true;
 
 	/* validate that FW support BA */
 	wl1271_check_ba_support(wl);
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index beed621..20b0031 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -25,6 +25,7 @@
 #ifndef __IO_H__
 #define __IO_H__
 
+#include <linux/irqreturn.h>
 #include "reg.h"
 
 #define HW_ACCESS_MEMORY_MAX_RANGE	0x1FFC0
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index e6497dc..f37f0b8 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -3354,9 +3354,12 @@
 	if (ret < 0)
 		goto out;
 
+	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
+		     tid, action);
+
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		if (wl->ba_support) {
+		if ((wl->ba_support) && (wl->ba_allowed)) {
 			ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
 								 true);
 			if (!ret)
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 51662bb..beebf64 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -21,6 +21,7 @@
  *
  */
 
+#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/crc7.h>
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index fbe8f46..3bc794a 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -564,6 +564,7 @@
 	/* RX BA constraint value */
 	bool ba_support;
 	u8 ba_rx_bitmap;
+	bool ba_allowed;
 
 	int tcxo_clock;
 
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 372572c..039d976 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -26,6 +26,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
+#include <linux/interrupt.h>
 
 #define DRIVER_NAME "xilinx_emaclite"
 
@@ -647,7 +648,8 @@
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += len;
 
-	netif_rx(skb);		/* Send the packet upstream */
+	if (!skb_defer_rx_timestamp(skb))
+		netif_rx(skb);	/* Send the packet upstream */
 }
 
 /**
@@ -1029,15 +1031,19 @@
 	spin_lock_irqsave(&lp->reset_lock, flags);
 	if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
 		/* If the Emaclite Tx buffer is busy, stop the Tx queue and
-		 * defer the skb for transmission at a later point when the
+		 * defer the skb for transmission during the ISR, after the
 		 * current transmission is complete */
 		netif_stop_queue(dev);
 		lp->deferred_skb = new_skb;
+		/* Take the time stamp now, since we can't do this in an ISR. */
+		skb_tx_timestamp(new_skb);
 		spin_unlock_irqrestore(&lp->reset_lock, flags);
 		return 0;
 	}
 	spin_unlock_irqrestore(&lp->reset_lock, flags);
 
+	skb_tx_timestamp(new_skb);
+
 	dev->stats.tx_bytes += len;
 	dev_kfree_skb(new_skb);
 
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index ec2800f..8b88817 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -731,7 +731,7 @@
 		cur_frame_end_offset -= ((count + 1)>>1) + 3;
 		if (cur_frame_end_offset < 0)
 		  cur_frame_end_offset += RX_BUF_SIZE/2;
-	};
+	}
 
 	/* Now step  forward through the list. */
 	do {
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 5336120..1fa07b0 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -12,6 +12,7 @@
 #ifndef _CTC_MPC_H_
 #define _CTC_MPC_H_
 
+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include "fsm.h"
 
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index 97a61b4..e1f1e34 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -19,6 +19,23 @@
 /*
  * doorbell message sent to the chip
  */
+struct b577xx_doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 zero_fill2;
+	u8 zero_fill1;
+	struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct b577xx_doorbell_hdr header;
+	u8 zero_fill1;
+	u16 zero_fill2;
+#endif
+};
+
+
+
+/*
+ * doorbell message sent to the chip
+ */
 struct b577xx_doorbell_set_prod {
 #if defined(__BIG_ENDIAN)
 	u16 prod;
@@ -39,106 +56,63 @@
 
 
 /*
- * Fixed size structure in order to plant it in Union structure
+ * ABTS info $$KEEP_ENDIANNESS$$
  */
-struct fcoe_abts_rsp_union {
-	u32 r_ctl;
-	u32 abts_rsp_payload[7];
+struct fcoe_abts_info {
+	__le16 aborted_task_id;
+	__le16 reserved0;
+	__le32 reserved1;
 };
 
 
 /*
- * 4 regs size
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+	u8 r_ctl;
+	u8 rsrv[3];
+	__le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
  */
 struct fcoe_bd_ctx {
-	u32 buf_addr_hi;
-	u32 buf_addr_lo;
-#if defined(__BIG_ENDIAN)
-	u16 rsrv0;
-	u16 buf_len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 buf_len;
-	u16 rsrv0;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 rsrv1;
-	u16 flags;
-#elif defined(__LITTLE_ENDIAN)
-	u16 flags;
-	u16 rsrv1;
-#endif
+	__le32 buf_addr_hi;
+	__le32 buf_addr_lo;
+	__le16 buf_len;
+	__le16 rsrv0;
+	__le16 flags;
+	__le16 rsrv1;
 };
 
 
-struct fcoe_cleanup_flow_info {
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u16 task_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 task_id;
-	u16 reserved1;
-#endif
-	u32 reserved2[7];
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+	struct regpair cur_buf_addr;
+	__le16 cur_buf_rem;
+	__le16 second_buf_rem;
+	struct regpair second_buf_addr;
 };
 
 
-struct fcoe_fcp_cmd_payload {
-	u32 opaque[8];
-};
-
-struct fcoe_fc_hdr {
-#if defined(__BIG_ENDIAN)
-	u8 cs_ctl;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 s_id[3];
-	u8 cs_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 r_ctl;
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 d_id[3];
-	u8 r_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 seq_id;
-	u8 df_ctl;
-	u16 seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
-	u16 seq_cnt;
-	u8 df_ctl;
-	u8 seq_id;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 type;
-	u8 f_ctl[3];
-#elif defined(__LITTLE_ENDIAN)
-	u8 f_ctl[3];
-	u8 type;
-#endif
-	u32 parameters;
-#if defined(__BIG_ENDIAN)
-	u16 ox_id;
-	u16 rx_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_id;
-	u16 ox_id;
-#endif
-};
-
-struct fcoe_fc_frame {
-	struct fcoe_fc_hdr fc_hdr;
-	u32 reserved0[2];
-};
-
-union fcoe_cmd_flow_info {
-	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
-	struct fcoe_fc_frame mp_fc_frame;
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+	__le16 cleaned_task_id;
+	__le16 rolled_tx_seq_cnt;
+	__le32 rolled_tx_data_offset;
 };
 
 
-
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
 struct fcoe_fcp_rsp_flags {
 	u8 flags;
 #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
@@ -155,95 +129,168 @@
 #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
 };
 
-
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
 struct fcoe_fcp_rsp_payload {
 	struct regpair reserved0;
-	u32 fcp_resid;
-#if defined(__BIG_ENDIAN)
-	u16 retry_delay_timer;
-	struct fcoe_fcp_rsp_flags fcp_flags;
-	u8 scsi_status_code;
-#elif defined(__LITTLE_ENDIAN)
+	__le32 fcp_resid;
 	u8 scsi_status_code;
 	struct fcoe_fcp_rsp_flags fcp_flags;
-	u16 retry_delay_timer;
-#endif
-	u32 fcp_rsp_len;
-	u32 fcp_sns_len;
+	__le16 retry_delay_timer;
+	__le32 fcp_rsp_len;
+	__le32 fcp_sns_len;
 };
 
-
 /*
  * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
  */
 struct fcoe_fcp_rsp_union {
 	struct fcoe_fcp_rsp_payload payload;
 	struct regpair reserved0;
 };
 
-
-struct fcoe_fcp_xfr_rdy_payload {
-	u32 burst_len;
-	u32 data_ro;
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+	u8 s_id[3];
+	u8 cs_ctl;
+	u8 d_id[3];
+	u8 r_ctl;
+	__le16 seq_cnt;
+	u8 df_ctl;
+	u8 seq_id;
+	u8 f_ctl[3];
+	u8 type;
+	__le32 parameters;
+	__le16 rx_id;
+	__le16 ox_id;
 };
 
-struct fcoe_read_flow_info {
-	struct fcoe_fc_hdr fc_data_in_hdr;
-	u32 reserved[2];
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 mp_payload_len;
+	__le32 rsrv;
 };
 
-struct fcoe_write_flow_info {
-	struct fcoe_fc_hdr fc_data_out_hdr;
-	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
-};
-
-union fcoe_rsp_flow_info {
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
 	struct fcoe_fcp_rsp_union fcp_rsp;
 	struct fcoe_abts_rsp_union abts_rsp;
+	struct fcoe_mp_rsp_union mp_rsp;
+	__le32 opaque[8];
 };
 
+
 /*
- * 32 bytes used for general purposes
+ * External ABTS info $$KEEP_ENDIANNESS$$
  */
-union fcoe_general_task_ctx {
-	union fcoe_cmd_flow_info cmd_info;
-	struct fcoe_read_flow_info read_info;
-	struct fcoe_write_flow_info write_info;
-	union fcoe_rsp_flow_info rsp_info;
-	struct fcoe_cleanup_flow_info cleanup_info;
-	u32 comp_info[8];
+struct fcoe_ext_abts_info {
+	__le32 rsrv0[6];
+	struct fcoe_abts_info ctx;
 };
 
 
 /*
- * FCoE KCQ CQE parameters
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+	__le32 rsrv0[6];
+	struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+	__le32 data_offset;
+	__le16 seq_cnt;
+	__le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+	__le32 rsrv0[6];
+	struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+	struct regpair cur_sge_addr;
+	__le16 cur_sge_off;
+	u8 cur_sge_idx;
+	u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+	struct fcoe_mul_sges_ctx mul_sgl;
+	struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+	__le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+	__le32 burst_len;
+	__le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
  */
 union fcoe_kcqe_params {
-	u32 reserved0[4];
+	__le32 reserved0[4];
 };
 
 /*
- * FCoE KCQ CQE
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kcqe {
-	u32 fcoe_conn_id;
-	u32 completion_status;
-	u32 fcoe_conn_context_id;
+	__le32 fcoe_conn_id;
+	__le32 completion_status;
+	__le32 fcoe_conn_context_id;
 	union fcoe_kcqe_params params;
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
-	u8 op_code;
-	u16 qe_self_seq;
-#elif defined(__LITTLE_ENDIAN)
-	u16 qe_self_seq;
+	__le16 qe_self_seq;
 	u8 op_code;
 	u8 flags;
 #define FCOE_KCQE_RESERVED0 (0x7<<0)
@@ -254,23 +301,14 @@
 #define FCOE_KCQE_LAYER_CODE_SHIFT 4
 #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
 #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
-#endif
 };
 
+
+
 /*
- * FCoE KWQE header
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_header {
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
-	u8 op_code;
-#elif defined(__LITTLE_ENDIAN)
 	u8 op_code;
 	u8 flags;
 #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
@@ -279,50 +317,23 @@
 #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
 #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
 #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
-#endif
 };
 
 /*
- * FCoE firmware init request 1
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_init1 {
-#if defined(__BIG_ENDIAN)
+	__le16 num_tasks;
 	struct fcoe_kwqe_header hdr;
-	u16 num_tasks;
-#elif defined(__LITTLE_ENDIAN)
-	u16 num_tasks;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 task_list_pbl_addr_lo;
-	u32 task_list_pbl_addr_hi;
-	u32 dummy_buffer_addr_lo;
-	u32 dummy_buffer_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 rq_num_wqes;
-	u16 sq_num_wqes;
-#elif defined(__LITTLE_ENDIAN)
-	u16 sq_num_wqes;
-	u16 rq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 cq_num_wqes;
-	u16 rq_buffer_log_size;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rq_buffer_log_size;
-	u16 cq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
-	u8 num_sessions_log;
-	u16 mtu;
-#elif defined(__LITTLE_ENDIAN)
-	u16 mtu;
+	__le32 task_list_pbl_addr_lo;
+	__le32 task_list_pbl_addr_hi;
+	__le32 dummy_buffer_addr_lo;
+	__le32 dummy_buffer_addr_hi;
+	__le16 sq_num_wqes;
+	__le16 rq_num_wqes;
+	__le16 rq_buffer_log_size;
+	__le16 cq_num_wqes;
+	__le16 mtu;
 	u8 num_sessions_log;
 	u8 flags;
 #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
@@ -331,113 +342,73 @@
 #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
 #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
 #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
-#endif
 };
 
 /*
- * FCoE firmware init request 2
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_init2 {
-#if defined(__BIG_ENDIAN)
+	u8 hsi_major_version;
+	u8 hsi_minor_version;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 hash_tbl_pbl_addr_lo;
-	u32 hash_tbl_pbl_addr_hi;
-	u32 t2_hash_tbl_addr_lo;
-	u32 t2_hash_tbl_addr_hi;
-	u32 t2_ptr_hash_tbl_addr_lo;
-	u32 t2_ptr_hash_tbl_addr_hi;
-	u32 free_list_count;
+	__le32 hash_tbl_pbl_addr_lo;
+	__le32 hash_tbl_pbl_addr_hi;
+	__le32 t2_hash_tbl_addr_lo;
+	__le32 t2_hash_tbl_addr_hi;
+	__le32 t2_ptr_hash_tbl_addr_lo;
+	__le32 t2_ptr_hash_tbl_addr_hi;
+	__le32 free_list_count;
 };
 
 /*
- * FCoE firmware init request 3
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_init3 {
-#if defined(__BIG_ENDIAN)
+	__le16 reserved0;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 error_bit_map_lo;
-	u32 error_bit_map_hi;
-#if defined(__BIG_ENDIAN)
+	__le32 error_bit_map_lo;
+	__le32 error_bit_map_hi;
+	u8 perf_config;
 	u8 reserved21[3];
-	u8 cached_session_enable;
-#elif defined(__LITTLE_ENDIAN)
-	u8 cached_session_enable;
-	u8 reserved21[3];
-#endif
-	u32 reserved2[4];
+	__le32 reserved2[4];
 };
 
 /*
- * FCoE connection offload request 1
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_offload1 {
-#if defined(__BIG_ENDIAN)
+	__le16 fcoe_conn_id;
 	struct fcoe_kwqe_header hdr;
-	u16 fcoe_conn_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 fcoe_conn_id;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 sq_addr_lo;
-	u32 sq_addr_hi;
-	u32 rq_pbl_addr_lo;
-	u32 rq_pbl_addr_hi;
-	u32 rq_first_pbe_addr_lo;
-	u32 rq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 reserved0;
-	u16 rq_prod;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rq_prod;
-	u16 reserved0;
-#endif
+	__le32 sq_addr_lo;
+	__le32 sq_addr_hi;
+	__le32 rq_pbl_addr_lo;
+	__le32 rq_pbl_addr_hi;
+	__le32 rq_first_pbe_addr_lo;
+	__le32 rq_first_pbe_addr_hi;
+	__le16 rq_prod;
+	__le16 reserved0;
 };
 
 /*
- * FCoE connection offload request 2
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_offload2 {
-#if defined(__BIG_ENDIAN)
+	__le16 tx_max_fc_pay_len;
 	struct fcoe_kwqe_header hdr;
-	u16 tx_max_fc_pay_len;
-#elif defined(__LITTLE_ENDIAN)
-	u16 tx_max_fc_pay_len;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 cq_addr_lo;
-	u32 cq_addr_hi;
-	u32 xferq_addr_lo;
-	u32 xferq_addr_hi;
-	u32 conn_db_addr_lo;
-	u32 conn_db_addr_hi;
-	u32 reserved1;
+	__le32 cq_addr_lo;
+	__le32 cq_addr_hi;
+	__le32 xferq_addr_lo;
+	__le32 xferq_addr_hi;
+	__le32 conn_db_addr_lo;
+	__le32 conn_db_addr_hi;
+	__le32 reserved1;
 };
 
 /*
- * FCoE connection offload request 3
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_offload3 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
-#elif defined(__LITTLE_ENDIAN)
-	u16 vlan_tag;
+	__le16 vlan_tag;
 #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
 #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
 #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
@@ -445,34 +416,8 @@
 #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
 #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
 	struct fcoe_kwqe_header hdr;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 tx_max_conc_seqs_c3;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
 	u8 s_id[3];
 	u8 tx_max_conc_seqs_c3;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
 	u8 d_id[3];
 	u8 flags;
 #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
@@ -491,69 +436,44 @@
 #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
 #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
 #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-#endif
-	u32 reserved;
-	u32 confq_first_pbe_addr_lo;
-	u32 confq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
-	u16 rx_max_fc_pay_len;
-	u16 tx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
-	u16 tx_total_conc_seqs;
-	u16 rx_max_fc_pay_len;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 rx_open_seqs_exch_c3;
-	u8 rx_max_conc_seqs_c3;
-	u16 rx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_total_conc_seqs;
+	__le32 reserved;
+	__le32 confq_first_pbe_addr_lo;
+	__le32 confq_first_pbe_addr_hi;
+	__le16 tx_total_conc_seqs;
+	__le16 rx_max_fc_pay_len;
+	__le16 rx_total_conc_seqs;
 	u8 rx_max_conc_seqs_c3;
 	u8 rx_open_seqs_exch_c3;
-#endif
 };
 
 /*
- * FCoE connection offload request 4
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_offload4 {
-#if defined(__BIG_ENDIAN)
-	struct fcoe_kwqe_header hdr;
-	u8 reserved2;
-	u8 e_d_tov_timer_val;
-#elif defined(__LITTLE_ENDIAN)
 	u8 e_d_tov_timer_val;
 	u8 reserved2;
 	struct fcoe_kwqe_header hdr;
-#endif
-	u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
-	u8 dst_mac_addr_hi16[2];
-	u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
-	u8 src_mac_addr_hi16[2];
-	u8 dst_mac_addr_hi16[2];
-#endif
-	u8 dst_mac_addr_lo32[4];
-	u32 lcq_addr_lo;
-	u32 lcq_addr_hi;
-	u32 confq_pbl_base_addr_lo;
-	u32 confq_pbl_base_addr_hi;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u8 dst_mac_addr_hi[2];
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	__le32 lcq_addr_lo;
+	__le32 lcq_addr_hi;
+	__le32 confq_pbl_base_addr_lo;
+	__le32 confq_pbl_base_addr_hi;
 };
 
 /*
- * FCoE connection enable request
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_enable_disable {
-#if defined(__BIG_ENDIAN)
+	__le16 reserved0;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
 	u16 vlan_tag;
 #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
 #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
@@ -561,92 +481,52 @@
 #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
 #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
 #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-	u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
-	u8 src_mac_addr_hi16[2];
-	u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-#endif
-	u8 dst_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 dst_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
-	u8 dst_mac_addr_hi16[2];
-	u16 reserved1;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 vlan_flag;
-	u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	u8 dst_mac_addr_hi[2];
+	__le16 reserved1;
 	u8 s_id[3];
 	u8 vlan_flag;
-#endif
-#if defined(__BIG_ENDIAN)
-	u8 reserved3;
-	u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
 	u8 d_id[3];
 	u8 reserved3;
-#endif
-	u32 context_id;
-	u32 conn_id;
-	u32 reserved4;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved4;
 };
 
 /*
- * FCoE connection destroy request
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_conn_destroy {
-#if defined(__BIG_ENDIAN)
+	__le16 reserved0;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 context_id;
-	u32 conn_id;
-	u32 reserved1[5];
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved1[5];
 };
 
 /*
- * FCoe destroy request
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_destroy {
-#if defined(__BIG_ENDIAN)
+	__le16 reserved0;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 reserved1[7];
+	__le32 reserved1[7];
 };
 
 /*
- * FCoe statistics request
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
  */
 struct fcoe_kwqe_stat {
-#if defined(__BIG_ENDIAN)
+	__le16 reserved0;
 	struct fcoe_kwqe_header hdr;
-	u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
-	u16 reserved0;
-	struct fcoe_kwqe_header hdr;
-#endif
-	u32 stat_params_addr_lo;
-	u32 stat_params_addr_hi;
-	u32 reserved1[5];
+	__le32 stat_params_addr_lo;
+	__le32 stat_params_addr_hi;
+	__le32 reserved1[5];
 };
 
 /*
- * FCoE KWQ WQE
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
  */
 union fcoe_kwqe {
 	struct fcoe_kwqe_init1 init1;
@@ -662,19 +542,42 @@
 	struct fcoe_kwqe_stat statistics;
 };
 
-struct fcoe_mul_sges_ctx {
-	struct regpair cur_sge_addr;
-#if defined(__BIG_ENDIAN)
-	u8 sgl_size;
-	u8 cur_sge_idx;
-	u16 cur_sge_off;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cur_sge_off;
-	u8 cur_sge_idx;
-	u8 sgl_size;
-#endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+	struct fcoe_cached_sge_ctx cached_sge;
+	struct fcoe_ext_mul_sges_ctx sgl;
+	__le32 opaque[5];
 };
 
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
 struct fcoe_s_stat_ctx {
 	u8 flags;
 #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
@@ -693,51 +596,34 @@
 #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
 };
 
-struct fcoe_seq_ctx {
-#if defined(__BIG_ENDIAN)
-	u16 low_seq_cnt;
-	struct fcoe_s_stat_ctx s_stat;
-	u8 seq_id;
-#elif defined(__LITTLE_ENDIAN)
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
 	u8 seq_id;
 	struct fcoe_s_stat_ctx s_stat;
-	u16 low_seq_cnt;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 err_seq_cnt;
-	u16 high_seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
-	u16 high_seq_cnt;
-	u16 err_seq_cnt;
-#endif
-	u32 low_exp_ro;
-	u32 high_exp_ro;
+	__le16 seq_cnt;
+	__le32 low_exp_ro;
+	__le32 high_exp_ro;
 };
 
 
-struct fcoe_single_sge_ctx {
-	struct regpair cur_buf_addr;
-#if defined(__BIG_ENDIAN)
-	u16 reserved0;
-	u16 cur_buf_rem;
-#elif defined(__LITTLE_ENDIAN)
-	u16 cur_buf_rem;
-	u16 reserved0;
-#endif
-};
-
-union fcoe_sgl_ctx {
-	struct fcoe_single_sge_ctx single_sge;
-	struct fcoe_mul_sges_ctx mul_sges;
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+	struct fcoe_read_flow_info read_info;
+	union fcoe_comp_flow_info comp_info;
+	__le32 opaque[8];
 };
 
 
 
 /*
- * FCoE SQ element
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
  */
 struct fcoe_sqe {
-	u16 wqe;
+	__le16 wqe;
 #define FCOE_SQE_TASK_ID (0x7FFF<<0)
 #define FCOE_SQE_TASK_ID_SHIFT 0
 #define FCOE_SQE_TOGGLE_BIT (0x1<<15)
@@ -746,135 +632,141 @@
 
 
 
-struct fcoe_task_ctx_entry_tx_only {
-	union fcoe_sgl_ctx sgl_ctx;
-};
-
-struct fcoe_task_ctx_entry_txwr_rxrd {
-#if defined(__BIG_ENDIAN)
-	u16 verify_tx_seq;
-	u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
-	u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
-#elif defined(__LITTLE_ENDIAN)
-	u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
-	u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
-	u16 verify_tx_seq;
-#endif
-};
-
 /*
- * Common section. Both TX and RX processing might write and read from it in
- * different flows
+ * 14 regs $$KEEP_ENDIANNESS$$
  */
-struct fcoe_task_ctx_entry_tx_rx_cmn {
-	u32 data_2_trns;
-	union fcoe_general_task_ctx general;
-#if defined(__BIG_ENDIAN)
-	u16 tx_low_seq_cnt;
-	struct fcoe_s_stat_ctx tx_s_stat;
-	u8 tx_seq_id;
-#elif defined(__LITTLE_ENDIAN)
-	u8 tx_seq_id;
-	struct fcoe_s_stat_ctx tx_s_stat;
-	u16 tx_low_seq_cnt;
-#endif
-	u32 common_flags;
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+struct fcoe_tce_tx_only {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0;
 };
 
-struct fcoe_task_ctx_entry_rxwr_txrd {
-#if defined(__BIG_ENDIAN)
-	u16 rx_id;
-	u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
-	u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-	u16 rx_id;
-#endif
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+	struct fcoe_fc_frame tx_frame;
+	struct fcoe_fcp_cmd_payload fcp_cmd;
+	struct fcoe_ext_cleanup_info cleanup;
+	struct fcoe_ext_abts_info abts;
+	struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+	__le32 opaque[8];
 };
 
-struct fcoe_task_ctx_entry_rx_only {
-	struct fcoe_seq_ctx seq_ctx;
-	struct fcoe_seq_ctx ooo_seq_ctx;
-	u32 rsrv3;
-	union fcoe_sgl_ctx sgl_ctx;
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+	u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+	u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+	__le16 rsrv3;
+	__le32 verify_tx_seq;
 };
 
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+	union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+	__le32 data_2_trns;
+	__le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+	__le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+	__le16 rx_id;
+	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+	struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+	struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+	struct fcoe_rx_seq_ctx rx_seq_ctx;
+	union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
 struct fcoe_task_ctx_entry {
-	struct fcoe_task_ctx_entry_tx_only tx_wr_only;
-	struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
-	struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
-	struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
-	struct fcoe_task_ctx_entry_rx_only rx_wr_only;
-	u32 reserved[4];
+	struct fcoe_tce_tx_only txwr_only;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr_only;
 };
 
 
+
+
+
+
+
+
+
+
 /*
- * FCoE XFRQ element
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
  */
 struct fcoe_xfrqe {
-	u16 wqe;
+	__le16 wqe;
 #define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
 #define FCOE_XFRQE_TASK_ID_SHIFT 0
 #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
@@ -883,22 +775,31 @@
 
 
 /*
- * FCoE CONFQ element
+ * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$
  */
-struct fcoe_confqe {
-#if defined(__BIG_ENDIAN)
-	u16 rx_id;
-	u16 ox_id;
-#elif defined(__LITTLE_ENDIAN)
-	u16 ox_id;
-	u16 rx_id;
-#endif
-	u32 param;
+struct b577xx_fcoe_rx_doorbell {
+	struct b577xx_doorbell_hdr hdr;
+	u8 params;
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0)
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0
+#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5)
+#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5
+	__le16 doorbell_cq_cons;
 };
 
 
 /*
- * FCoE connection data base
+ * FCoE CONFQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_confqe {
+	__le16 ox_id;
+	__le16 rx_id;
+	__le32 param;
+};
+
+
+/*
+ * FCoE conection data base
  */
 struct fcoe_conn_db {
 #if defined(__BIG_ENDIAN)
@@ -914,10 +815,10 @@
 
 
 /*
- * FCoE CQ element
+ * FCoE CQ element $$KEEP_ENDIANNESS$$
  */
 struct fcoe_cqe {
-	u16 wqe;
+	__le16 wqe;
 #define FCOE_CQE_CQE_INFO (0x3FFF<<0)
 #define FCOE_CQE_CQE_INFO_SHIFT 0
 #define FCOE_CQE_CQE_TYPE (0x1<<14)
@@ -928,61 +829,46 @@
 
 
 /*
- * FCoE error/warning resporting entry
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_partial_err_report_entry {
+	__le32 err_warn_bitmap_lo;
+	__le32 err_warn_bitmap_hi;
+	__le32 tx_buf_off;
+	__le32 rx_buf_off;
+};
+
+/*
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
  */
 struct fcoe_err_report_entry {
-	u32 err_warn_bitmap_lo;
-	u32 err_warn_bitmap_hi;
-	u32 tx_buf_off;
-	u32 rx_buf_off;
+	struct fcoe_partial_err_report_entry data;
 	struct fcoe_fc_hdr fc_hdr;
 };
 
 
 /*
- * FCoE hash table entry (32 bytes)
+ * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$
  */
 struct fcoe_hash_table_entry {
-#if defined(__BIG_ENDIAN)
-	u8 d_id_0;
-	u8 s_id_2;
-	u8 s_id_1;
-	u8 s_id_0;
-#elif defined(__LITTLE_ENDIAN)
 	u8 s_id_0;
 	u8 s_id_1;
 	u8 s_id_2;
 	u8 d_id_0;
-#endif
-#if defined(__BIG_ENDIAN)
-	u16 dst_mac_addr_hi;
-	u8 d_id_2;
-	u8 d_id_1;
-#elif defined(__LITTLE_ENDIAN)
 	u8 d_id_1;
 	u8 d_id_2;
-	u16 dst_mac_addr_hi;
-#endif
-	u32 dst_mac_addr_lo;
-#if defined(__BIG_ENDIAN)
-	u16 vlan_id;
-	u16 src_mac_addr_hi;
-#elif defined(__LITTLE_ENDIAN)
-	u16 src_mac_addr_hi;
-	u16 vlan_id;
-#endif
-	u32 src_mac_addr_lo;
-#if defined(__BIG_ENDIAN)
-	u16 reserved1;
-	u8 reserved0;
-	u8 vlan_flag;
-#elif defined(__LITTLE_ENDIAN)
+	__le16 dst_mac_addr_hi;
+	__le16 dst_mac_addr_mid;
+	__le16 dst_mac_addr_lo;
+	__le16 src_mac_addr_hi;
+	__le16 vlan_id;
+	__le16 src_mac_addr_lo;
+	__le16 src_mac_addr_mid;
 	u8 vlan_flag;
 	u8 reserved0;
-	u16 reserved1;
-#endif
-	u32 reserved2;
-	u32 field_id;
+	__le16 reserved1;
+	__le32 reserved2;
+	__le32 field_id;
 #define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
 #define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
 #define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
@@ -991,11 +877,27 @@
 #define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
 };
 
+
 /*
- * FCoE pending work request CQE
+ * FCoE LCQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_lcqe {
+	__le32 wqe;
+#define FCOE_LCQE_TASK_ID (0xFFFF<<0)
+#define FCOE_LCQE_TASK_ID_SHIFT 0
+#define FCOE_LCQE_LCQE_TYPE (0xFF<<16)
+#define FCOE_LCQE_LCQE_TYPE_SHIFT 16
+#define FCOE_LCQE_RESERVED (0xFF<<24)
+#define FCOE_LCQE_RESERVED_SHIFT 24
+};
+
+
+
+/*
+ * FCoE pending work request CQE $$KEEP_ENDIANNESS$$
  */
 struct fcoe_pend_wq_cqe {
-	u16 wqe;
+	__le16 wqe;
 #define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
 #define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
 #define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
@@ -1006,53 +908,61 @@
 
 
 /*
- * FCoE RX statistics parameters section#0
+ * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_rx_stat_params_section0 {
-	u32 fcoe_ver_cnt;
-	u32 fcoe_rx_pkt_cnt;
-	u32 fcoe_rx_byte_cnt;
-	u32 fcoe_rx_drop_pkt_cnt;
+	__le32 fcoe_rx_pkt_cnt;
+	__le32 fcoe_rx_byte_cnt;
 };
 
 
 /*
- * FCoE RX statistics parameters section#1
+ * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$
  */
 struct fcoe_rx_stat_params_section1 {
-	u32 fc_crc_cnt;
-	u32 eofa_del_cnt;
-	u32 miss_frame_cnt;
-	u32 seq_timeout_cnt;
-	u32 drop_seq_cnt;
-	u32 fcoe_rx_drop_pkt_cnt;
-	u32 fcp_rx_pkt_cnt;
-	u32 reserved0;
+	__le32 fcoe_ver_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
 };
 
 
 /*
- * FCoE TX statistics parameters
+ * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_stat_params_section2 {
+	__le32 fc_crc_cnt;
+	__le32 eofa_del_cnt;
+	__le32 miss_frame_cnt;
+	__le32 seq_timeout_cnt;
+	__le32 drop_seq_cnt;
+	__le32 fcoe_rx_drop_pkt_cnt;
+	__le32 fcp_rx_pkt_cnt;
+	__le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$
  */
 struct fcoe_tx_stat_params {
-	u32 fcoe_tx_pkt_cnt;
-	u32 fcoe_tx_byte_cnt;
-	u32 fcp_tx_pkt_cnt;
-	u32 reserved0;
+	__le32 fcoe_tx_pkt_cnt;
+	__le32 fcoe_tx_byte_cnt;
+	__le32 fcp_tx_pkt_cnt;
+	__le32 reserved0;
 };
 
 /*
- * FCoE statistics parameters
+ * FCoE statistics parameters $$KEEP_ENDIANNESS$$
  */
 struct fcoe_statistics_params {
 	struct fcoe_tx_stat_params tx_stat;
 	struct fcoe_rx_stat_params_section0 rx_stat0;
 	struct fcoe_rx_stat_params_section1 rx_stat1;
+	struct fcoe_rx_stat_params_section2 rx_stat2;
 };
 
 
 /*
- * FCoE t2 hash table entry (64 bytes)
+ * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$
  */
 struct fcoe_t2_hash_table_entry {
 	struct fcoe_hash_table_entry data;
@@ -1060,11 +970,13 @@
 	struct regpair reserved0[3];
 };
 
+
+
 /*
- * FCoE unsolicited CQE
+ * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$
  */
 struct fcoe_unsolicited_cqe {
-	u16 wqe;
+	__le16 wqe;
 #define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
 #define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
 #define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
@@ -1075,6 +987,4 @@
 #define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
 };
 
-
-
 #endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0a404bf..907672e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.1"
+#define BNX2FC_VERSION		"1.0.3"
 
 #define PFX			"bnx2fc: "
 
@@ -262,9 +262,14 @@
 #define BNX2FC_FLAG_UPLD_REQ_COMPL	0x8
 #define BNX2FC_FLAG_EXPL_LOGO		0x9
 
+	u8 src_addr[ETH_ALEN];
 	u32 max_sqes;
 	u32 max_rqes;
 	u32 max_cqes;
+	atomic_t free_sqes;
+
+	struct b577xx_doorbell_set_prod sq_db;
+	struct b577xx_fcoe_rx_doorbell rx_db;
 
 	struct fcoe_sqe *sq;
 	dma_addr_t sq_dma;
@@ -274,7 +279,7 @@
 
 	struct fcoe_cqe *cq;
 	dma_addr_t cq_dma;
-	u32 cq_cons_idx;
+	u16 cq_cons_idx;
 	u8 cq_curr_toggle_bit;
 	u32 cq_mem_size;
 
@@ -505,6 +510,7 @@
 						   struct fc_frame *,
 						   void *),
 				      void *arg, u32 timeout);
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index fe77691..399cda0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -5,6 +5,12 @@
  * This file defines HSI constants for the FCoE flows
  */
 
+/* Current FCoE HSI version number composed of two fields (16 bit) */
+/* Implies on a change broken previous HSI */
+#define FCOE_HSI_MAJOR_VERSION (1)
+/* Implies on a change which does not broken previous HSI */
+#define FCOE_HSI_MINOR_VERSION (1)
+
 /* KWQ/KCQ FCoE layer code */
 #define FCOE_KWQE_LAYER_CODE   (7)
 
@@ -40,21 +46,62 @@
 #define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE	(0x3)
 #define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE	(0x4)
 #define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR			(0x5)
+#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION   (0x6)
+
+/* CQE type */
+#define FCOE_PENDING_CQE_TYPE			0
+#define FCOE_UNSOLIC_CQE_TYPE			1
 
 /* Unsolicited CQE type */
 #define FCOE_UNSOLICITED_FRAME_CQE_TYPE			0
 #define FCOE_ERROR_DETECTION_CQE_TYPE			1
 #define FCOE_WARNING_DETECTION_CQE_TYPE			2
 
+/* E_D_TOV timer resolution in ms */
+#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20)
+
+/* E_D_TOV timer resolution for SDM (4 micro) */
+#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION				\
+	(FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* REC timer resolution in ms */
+#define FCOE_REC_TIMER_RESOLUTION_MS (20)
+
+/* REC timer resolution for SDM (4 micro) */
+#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */
+#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL			\
+			(2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS)
+
+/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */
+#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL			\
+			(3000 / FCOE_REC_TIMER_RESOLUTION_MS)
+
+#define FCOE_NUM_OF_TIMER_TASKS  (8 * 1024)
+
+#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8)
+
 /* Task context constants */
+/******** Remove FCP_CMD write tce sleep ***********************/
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value
+ *
+#define	FCOE_TASK_TX_STATE_NORMAL				0
+ * After driver has initialize the task in case timer services required *
+#define	FCOE_TASK_TX_STATE_INIT					1
+******** Remove FCP_CMD write tce sleep ***********************/
 /* After driver has initialize the task in case timer services required */
 #define	FCOE_TASK_TX_STATE_INIT					0
 /* In case timer services are required then shall be updated by Xstorm after
  * start processing the task. In case no timer facilities are required then the
- * driver would initialize the state to this value */
+ * driver would initialize the state to this value
+ */
 #define	FCOE_TASK_TX_STATE_NORMAL				1
 /* Task is under abort procedure. Updated in order to stop processing of
- * pending WQEs on this task */
+ * pending WQEs on this task
+ */
 #define	FCOE_TASK_TX_STATE_ABORT				2
 /* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
 #define	FCOE_TASK_TX_STATE_ERROR				3
@@ -66,17 +113,8 @@
 #define	FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP			6
 /* For sequence cleanup request task */
 #define	FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP			7
-/* Mark task as aborted and indicate that ABTS was not transmitted */
-#define	FCOE_TASK_TX_STATE_BEFORE_ABTS_TX			8
-/* Mark task as aborted and indicate that ABTS was transmitted */
-#define	FCOE_TASK_TX_STATE_AFTER_ABTS_TX			9
 /* For completion the ABTS task. */
-#define	FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED			10
-/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
- */
-#define	FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX		11
-/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
-#define	FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX		12
+#define	FCOE_TASK_TX_STATE_ABTS_TX				8
 
 #define	FCOE_TASK_RX_STATE_NORMAL				0
 #define	FCOE_TASK_RX_STATE_COMPLETED				1
@@ -86,25 +124,25 @@
 #define	FCOE_TASK_RX_STATE_WARNING				3
 /* For E_D_T_TOV timer expiration in Ustorm */
 #define	FCOE_TASK_RX_STATE_ERROR				4
-/* ABTS ACC arrived wait for local completion to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED			5
-/* local completion arrived wait for ABTS ACC to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED		6
+/* FW only: First visit at rx-path, part of the abts round trip */
+#define	FCOE_TASK_RX_STATE_ABTS_IN_PROCESS			5
+/* FW only: Second visit at rx-path, after ABTS frame transmitted */
+#define	FCOE_TASK_RX_STATE_ABTS_TRANSMITTED			6
 /* Special completion indication in case of task was aborted. */
 #define FCOE_TASK_RX_STATE_ABTS_COMPLETED			7
-/* Special completion indication in case of task was cleaned. */
-#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED		8
-/* Special completion indication (in task requested the exchange cleanup) in
- * case cleaned task is in non-valid. */
-#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED		9
+/* FW only: First visit at rx-path, part of the cleanup round trip */
+#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS		8
+/* FW only: Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED		9
+/* Not in used: Special completion indication (in task requested the exchange
+ * cleanup) in case cleaned task is in non-valid.
+ */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED		10
 /* Special completion indication (in task requested the sequence cleanup) in
- * case cleaned task was already returned to normal. */
-#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP		10
-/* Exchange cleanup arrived wait until xfer will be handled to finally
- * complete the task. */
-#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED		11
-/* Xfer handled, wait for exchange cleanup to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER	12
+ * case cleaned task was already returned to normal.
+ */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP		11
+
 
 #define	FCOE_TASK_TYPE_WRITE			0
 #define	FCOE_TASK_TYPE_READ				1
@@ -120,11 +158,40 @@
 #define FCOE_TASK_CLASS_TYPE_3			0
 #define FCOE_TASK_CLASS_TYPE_2			1
 
+/* FCoE/FC packet fields  */
+#define	FCOE_ETH_TYPE					0x8906
+
+/* FCoE maximum elements in hash table */
+#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW	8
+
+/* FCoE half of the elements in hash table */
+#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW			\
+			(FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2)
+
+/* FcoE number of cached T2 entries */
+#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4)
+
+/* FCoE maximum elements in hash table */
+#define FCOE_HASH_TBL_CHUNK_SIZE	16384
+
 /* Everest FCoE connection type */
 #define B577XX_FCOE_CONNECTION_TYPE		4
 
-/* Error codes for Error Reporting in fast path flows */
-/* XFER error codes */
+/* FCoE number of rows (in log). This number derives
+ * from the maximum connections supported which is 2048.
+ * TBA: Need a different constant for E2
+ */
+#define FCOE_MAX_NUM_SESSIONS_LOG		11
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN	12
+
+/* Error codes for Error Reporting in slow path flows */
+#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS			0
+#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE				1
+
+/* Error codes for Error Reporting in fast path flows
+ * XFER error codes
+ */
 #define FCOE_ERROR_CODE_XFER_OOO_RO					0
 #define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED				1
 #define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN				2
@@ -155,17 +222,17 @@
 #define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET			23
 #define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET			24
 #define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET				25
-#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET			26
-#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ			27
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET				26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ				27
 #define FCOE_ERROR_CODE_DATA_FCTL					28
 
 /* Middle path error codes */
-#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS				29
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE				29
 #define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET			30
 #define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET			31
 #define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET			32
 #define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET			33
-#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL				34
+#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL				34
 #define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY				35
 #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL				36
 
@@ -173,7 +240,7 @@
 #define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL				37
 #define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD			38
 #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL			39
-#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL			40
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL				40
 #define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH		41
 
 /* Common error codes */
@@ -185,7 +252,7 @@
 #define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES			47
 #define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR				48
 #define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG			49
-#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED		50
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED			50
 
 /* Unsolicited Rx error codes */
 #define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS			51
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 52c3584..7e89143 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -83,7 +83,7 @@
 	rrq.rrq_cmd = ELS_RRQ;
 	hton24(rrq.rrq_s_id, sid);
 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
-	rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+	rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
 
 retry_rrq:
 	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
@@ -417,12 +417,13 @@
 
 	hdr = (u64 *)fc_hdr;
 	temp_hdr = (u64 *)
-		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
 	hdr[2] = cpu_to_be64(temp_hdr[2]);
 
-	mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+	mp_req->resp_len =
+		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
 
 	/* Parse ELS response */
 	if ((els_req->cb_func) && (els_req->cb_arg)) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index ab255fb..7a16ca1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -21,7 +21,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Mar 17, 2011"
+#define DRV_MODULE_RELDATE	"Jun 10, 2011"
 
 
 static char version[] __devinitdata =
@@ -612,7 +612,7 @@
 		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
 		return bnx2fc_stats;
 	}
-	bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+	bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
 	bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
 	bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
 	bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f756d5f..d8e8a82 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -100,6 +100,9 @@
 	fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
+	fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
+	fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
+
 	fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
 	fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
 					   ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -122,6 +125,7 @@
 	fcoe_init3.error_bit_map_lo = 0xffffffff;
 	fcoe_init3.error_bit_map_hi = 0xffffffff;
 
+	fcoe_init3.perf_config = 1;
 
 	kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 	kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
@@ -289,19 +293,19 @@
 	ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
 
 
-	ofld_req4.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+	ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
 							/* local mac */
-	ofld_req4.src_mac_addr_lo32[1] =  port->data_src_addr[4];
-	ofld_req4.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	ofld_req4.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	ofld_req4.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	ofld_req4.src_mac_addr_hi16[1] =  port->data_src_addr[0];
-	ofld_req4.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	ofld_req4.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	ofld_req4.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	ofld_req4.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	ofld_req4.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	ofld_req4.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
+	ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
+	ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
+	ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
+	ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
+	ofld_req4.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	ofld_req4.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	ofld_req4.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	ofld_req4.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	ofld_req4.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	ofld_req4.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -345,20 +349,21 @@
 	enbl_req.hdr.flags =
 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
-	enbl_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+	enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
 							/* local mac */
-	enbl_req.src_mac_addr_lo32[1] =  port->data_src_addr[4];
-	enbl_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	enbl_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	enbl_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	enbl_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+	enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
+	enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
+	enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
+	enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
+	enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
+	memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 
-	enbl_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	enbl_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	enbl_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	enbl_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	enbl_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	enbl_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	enbl_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	enbl_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	enbl_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	enbl_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	enbl_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	enbl_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	port_id = fc_host_port_id(lport->host);
 	if (port_id != tgt->sid) {
@@ -411,18 +416,19 @@
 	disable_req.hdr.flags =
 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
-	disable_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
-	disable_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	disable_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	disable_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	disable_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+	disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
+	disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
+	disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
+	disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
+	disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
+	disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 
-	disable_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	disable_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	disable_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	disable_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	disable_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	disable_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	disable_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	disable_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	disable_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	disable_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	disable_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	disable_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	port_id = tgt->sid;
 	disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -640,10 +646,10 @@
 		xid = err_entry->fc_hdr.ox_id;
 		BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
-			err_entry->err_warn_bitmap_hi,
-			err_entry->err_warn_bitmap_lo);
+			err_entry->data.err_warn_bitmap_hi,
+			err_entry->data.err_warn_bitmap_lo);
 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
-			err_entry->tx_buf_off, err_entry->rx_buf_off);
+			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
 		bnx2fc_return_rqe(tgt, 1);
 
@@ -722,10 +728,10 @@
 		xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
 		BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
-			err_entry->err_warn_bitmap_hi,
-			err_entry->err_warn_bitmap_lo);
+			err_entry->data.err_warn_bitmap_hi,
+			err_entry->data.err_warn_bitmap_lo);
 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
-			err_entry->tx_buf_off, err_entry->rx_buf_off);
+			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
 		bnx2fc_return_rqe(tgt, 1);
 		spin_unlock_bh(&tgt->tgt_lock);
@@ -762,9 +768,9 @@
 	task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
 	task = &(task_page[index]);
 
-	num_rq = ((task->rx_wr_tx_rd.rx_flags &
-		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
-		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+	num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+		   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+		   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
 
 	io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 
@@ -777,22 +783,19 @@
 	/* Timestamp IO completion time */
 	cmd_type = io_req->cmd_type;
 
-	/* optimized completion path */
-	if (cmd_type == BNX2FC_SCSI_CMD) {
-		rx_state = ((task->rx_wr_tx_rd.rx_flags &
-			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
-			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+	rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
+		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
+		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
 
+	/* Process other IO completion types */
+	switch (cmd_type) {
+	case BNX2FC_SCSI_CMD:
 		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
 			bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
 			spin_unlock_bh(&tgt->tgt_lock);
 			return;
 		}
-	}
 
-	/* Process other IO completion types */
-	switch (cmd_type) {
-	case BNX2FC_SCSI_CMD:
 		if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 			bnx2fc_process_abts_compl(io_req, task, num_rq);
 		else if (rx_state ==
@@ -819,8 +822,16 @@
 		break;
 
 	case BNX2FC_ELS:
-		BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
-		bnx2fc_process_els_compl(io_req, task, num_rq);
+		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
+			bnx2fc_process_els_compl(io_req, task, num_rq);
+		else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+			bnx2fc_process_abts_compl(io_req, task, num_rq);
+		else if (rx_state ==
+			 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+		else
+			printk(KERN_ERR PFX "Invalid rx state =  %d\n",
+				rx_state);
 		break;
 
 	case BNX2FC_CLEANUP:
@@ -835,6 +846,20 @@
 	spin_unlock_bh(&tgt->tgt_lock);
 }
 
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
+{
+	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
+	u32 msg;
+
+	wmb();
+	rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
+			FCOE_CQE_TOGGLE_BIT_SHIFT);
+	msg = *((u32 *)rx_db);
+	writel(cpu_to_le32(msg), tgt->ctx_base);
+	mmiowb();
+
+}
+
 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 {
 	struct bnx2fc_work *work;
@@ -853,8 +878,8 @@
 	struct fcoe_cqe *cq;
 	u32 cq_cons;
 	struct fcoe_cqe *cqe;
+	u32 num_free_sqes = 0;
 	u16 wqe;
-	bool more_cqes_found = false;
 
 	/*
 	 * cq_lock is a low contention lock used to protect
@@ -872,62 +897,51 @@
 	cq_cons = tgt->cq_cons_idx;
 	cqe = &cq[cq_cons];
 
-	do {
-		more_cqes_found ^= true;
+	while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+	       (tgt->cq_curr_toggle_bit <<
+	       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
 
-		while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
-		       (tgt->cq_curr_toggle_bit <<
-		       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+		/* new entry on the cq */
+		if (wqe & FCOE_CQE_CQE_TYPE) {
+			/* Unsolicited event notification */
+			bnx2fc_process_unsol_compl(tgt, wqe);
+		} else {
+			/* Pending work request completion */
+			struct bnx2fc_work *work = NULL;
+			struct bnx2fc_percpu_s *fps = NULL;
+			unsigned int cpu = wqe % num_possible_cpus();
 
-			/* new entry on the cq */
-			if (wqe & FCOE_CQE_CQE_TYPE) {
-				/* Unsolicited event notification */
-				bnx2fc_process_unsol_compl(tgt, wqe);
-			} else {
-				struct bnx2fc_work *work = NULL;
-				struct bnx2fc_percpu_s *fps = NULL;
-				unsigned int cpu = wqe % num_possible_cpus();
+			fps = &per_cpu(bnx2fc_percpu, cpu);
+			spin_lock_bh(&fps->fp_work_lock);
+			if (unlikely(!fps->iothread))
+				goto unlock;
 
-				fps = &per_cpu(bnx2fc_percpu, cpu);
-				spin_lock_bh(&fps->fp_work_lock);
-				if (unlikely(!fps->iothread))
-					goto unlock;
-
-				work = bnx2fc_alloc_work(tgt, wqe);
-				if (work)
-					list_add_tail(&work->list,
-							&fps->work_list);
+			work = bnx2fc_alloc_work(tgt, wqe);
+			if (work)
+				list_add_tail(&work->list,
+					      &fps->work_list);
 unlock:
-				spin_unlock_bh(&fps->fp_work_lock);
+			spin_unlock_bh(&fps->fp_work_lock);
 
-				/* Pending work request completion */
-				if (fps->iothread && work)
-					wake_up_process(fps->iothread);
-				else
-					bnx2fc_process_cq_compl(tgt, wqe);
-			}
-			cqe++;
-			tgt->cq_cons_idx++;
-
-			if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
-				tgt->cq_cons_idx = 0;
-				cqe = cq;
-				tgt->cq_curr_toggle_bit =
-					1 - tgt->cq_curr_toggle_bit;
-			}
+			/* Pending work request completion */
+			if (fps->iothread && work)
+				wake_up_process(fps->iothread);
+			else
+				bnx2fc_process_cq_compl(tgt, wqe);
 		}
-		/* Re-arm CQ */
-		if (more_cqes_found) {
-			tgt->conn_db->cq_arm.lo = -1;
-			wmb();
+		cqe++;
+		tgt->cq_cons_idx++;
+		num_free_sqes++;
+
+		if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+			tgt->cq_cons_idx = 0;
+			cqe = cq;
+			tgt->cq_curr_toggle_bit =
+				1 - tgt->cq_curr_toggle_bit;
 		}
-	} while (more_cqes_found);
-
-	/*
-	 * Commit tgt->cq_cons_idx change to the memory
-	 * spin_lock implies full memory barrier, no need to smp_wmb
-	 */
-
+	}
+	bnx2fc_arm_cq(tgt);
+	atomic_add(num_free_sqes, &tgt->free_sqes);
 	spin_unlock_bh(&tgt->cq_lock);
 	return 0;
 }
@@ -1141,7 +1155,11 @@
 	case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
 		printk(KERN_ERR PFX "init_failure due to NIC error\n");
 		break;
-
+	case FCOE_KCQE_COMPLETION_STATUS_ERROR:
+		printk(KERN_ERR PFX "init failure due to compl status err\n");
+		break;
+	case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
+		printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
 	default:
 		printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
 	}
@@ -1247,21 +1265,14 @@
 
 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
 {
-	struct b577xx_doorbell_set_prod ev_doorbell;
+	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
 	u32 msg;
 
 	wmb();
-
-	memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
-	ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
-
-	ev_doorbell.prod = tgt->sq_prod_idx |
+	sq_db->prod = tgt->sq_prod_idx |
 				(tgt->sq_curr_toggle_bit << 15);
-	ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
-					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
-	msg = *((u32 *)&ev_doorbell);
+	msg = *((u32 *)sq_db);
 	writel(cpu_to_le32(msg), tgt->ctx_base);
-
 	mmiowb();
 
 }
@@ -1322,18 +1333,26 @@
 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
 
 	/* Tx Write Rx Read */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-	/* Common */
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.general.cleanup_info.task_id = orig_xid;
+	/* init flags */
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
 
+	/* Tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags =
+				FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
 
+	/* Rx Read Tx Write */
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
 }
 
 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
@@ -1342,6 +1361,7 @@
 	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	struct fc_frame_header *fc_hdr;
+	struct fcoe_ext_mul_sges_ctx *sgl;
 	u8 task_type = 0;
 	u64 *hdr;
 	u64 temp_hdr[3];
@@ -1367,47 +1387,49 @@
 	/* Tx only */
 	if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
 	    (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
 				(u32)mp_req->mp_req_bd_dma;
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)mp_req->mp_req_bd_dma >> 32);
-		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
-		BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
-			      (unsigned long long)mp_req->mp_req_bd_dma);
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
 	}
 
 	/* Tx Write Rx Read */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+	/* init flags */
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
 
-	/* Common */
-	task->cmn.data_2_trns = io_req->data_xfer_len;
-	context_id = tgt->context_id;
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+	/* tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
 
 	/* Rx Write Tx Read */
+	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+	/* rx flags */
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+	context_id = tgt->context_id;
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
 	fc_hdr = &(mp_req->req_fc_hdr);
 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
 		fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
 		fc_hdr->fh_rx_id = htons(0xffff);
-		task->rx_wr_tx_rd.rx_id = 0xffff;
+		task->rxwr_txrd.var_ctx.rx_id = 0xffff;
 	} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
 		fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
 	}
 
 	/* Fill FC Header into middle path buffer */
-	hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+	hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
 	memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
@@ -1415,12 +1437,12 @@
 
 	/* Rx Only */
 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+		sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
 
-		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
-				(u32)mp_req->mp_resp_bd_dma;
-		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
+		sgl->mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
-		task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+		sgl->mul_sgl.sgl_size = 1;
 	}
 }
 
@@ -1431,6 +1453,8 @@
 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
 	struct io_bdt *bd_tbl = io_req->bd_tbl;
 	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct fcoe_cached_sge_ctx *cached_sge;
+	struct fcoe_ext_mul_sges_ctx *sgl;
 	u64 *fcp_cmnd;
 	u64 tmp_fcp_cmnd[4];
 	u32 context_id;
@@ -1449,47 +1473,33 @@
 
 	/* Tx only */
 	if (task_type == FCOE_TASK_TYPE_WRITE) {
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
 				(u32)bd_tbl->bd_tbl_dma;
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
-		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
 				bd_tbl->bd_valid;
 	}
 
 	/*Tx Write Rx Read */
 	/* Init state to NORMAL */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-
-	/* Common */
-	task->cmn.data_2_trns = io_req->data_xfer_len;
-	context_id = tgt->context_id;
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
-
-	/* Set initiative ownership */
-	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+	/* tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
 
 	/* Set initial seq counter */
-	task->cmn.tx_low_seq_cnt = 1;
-
-	/* Set state to "waiting for the first packet" */
-	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+	task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
 
 	/* Fill FCP_CMND IU */
 	fcp_cmnd = (u64 *)
-		    task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+		    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
 	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
 
 	/* swap fcp_cmnd */
@@ -1501,32 +1511,54 @@
 	}
 
 	/* Rx Write Tx Read */
-	task->rx_wr_tx_rd.rx_id = 0xffff;
+	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+	context_id = tgt->context_id;
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+	/* rx flags */
+	/* Set state to "waiting for the first packet" */
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+	task->rxwr_txrd.var_ctx.rx_id = 0xffff;
 
 	/* Rx Only */
+	cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
+	sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+	bd_count = bd_tbl->bd_valid;
 	if (task_type == FCOE_TASK_TYPE_READ) {
-
-		bd_count = bd_tbl->bd_valid;
 		if (bd_count == 1) {
 
 			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
 
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
-					fcoe_bd_tbl->buf_addr_lo;
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
-					fcoe_bd_tbl->buf_addr_hi;
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
-					fcoe_bd_tbl->buf_len;
-			task->tx_wr_rx_rd.init_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+		} else if (bd_count == 2) {
+			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+
+			fcoe_bd_tbl++;
+			cached_sge->second_buf_addr.lo =
+						 fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->second_buf_addr.hi =
+						fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
+			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
 		} else {
 
-			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
-					(u32)bd_tbl->bd_tbl_dma;
-			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+			sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+			sgl->mul_sgl.cur_sge_addr.hi =
 					(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
-			task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
-					bd_tbl->bd_valid;
+			sgl->mul_sgl.sgl_size = bd_count;
 		}
 	}
 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index b5b5c34..5dc4205 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -425,6 +425,7 @@
 	struct list_head *listp;
 	struct io_bdt *bd_tbl;
 	int index = RESERVE_FREE_LIST_INDEX;
+	u32 free_sqes;
 	u32 max_sqes;
 	u16 xid;
 
@@ -445,8 +446,10 @@
 	 * cmgr lock
 	 */
 	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	free_sqes = atomic_read(&tgt->free_sqes);
 	if ((list_empty(&(cmd_mgr->free_list[index]))) ||
-	    (tgt->num_active_ios.counter  >= max_sqes)) {
+	    (tgt->num_active_ios.counter  >= max_sqes) ||
+	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
 		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
 			"ios(%d):sqes(%d)\n",
 			tgt->num_active_ios.counter, tgt->max_sqes);
@@ -463,6 +466,7 @@
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
+	atomic_dec(&tgt->free_sqes);
 	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 
 	INIT_LIST_HEAD(&io_req->link);
@@ -489,6 +493,7 @@
 	struct bnx2fc_cmd *io_req;
 	struct list_head *listp;
 	struct io_bdt *bd_tbl;
+	u32 free_sqes;
 	u32 max_sqes;
 	u16 xid;
 	int index = get_cpu();
@@ -499,8 +504,10 @@
 	 * cmgr lock
 	 */
 	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	free_sqes = atomic_read(&tgt->free_sqes);
 	if ((list_empty(&cmd_mgr->free_list[index])) ||
-	    (tgt->num_active_ios.counter  >= max_sqes)) {
+	    (tgt->num_active_ios.counter  >= max_sqes) ||
+	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
 		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 		put_cpu();
 		return NULL;
@@ -513,6 +520,7 @@
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
+	atomic_dec(&tgt->free_sqes);
 	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 	put_cpu();
 
@@ -873,7 +881,7 @@
 
 	/* Obtain oxid and rxid for the original exchange to be aborted */
 	fc_hdr->fh_ox_id = htons(io_req->xid);
-	fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+	fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
 
 	sid = tgt->sid;
 	did = rport->port_id;
@@ -1189,7 +1197,7 @@
 			kref_put(&io_req->refcount,
 				 bnx2fc_cmd_release); /* drop timer hold */
 
-	r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+	r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
 
 	switch (r_ctl) {
 	case FC_RCTL_BA_ACC:
@@ -1344,12 +1352,13 @@
 	fc_hdr = &(tm_req->resp_fc_hdr);
 	hdr = (u64 *)fc_hdr;
 	temp_hdr = (u64 *)
-		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
 	hdr[2] = cpu_to_be64(temp_hdr[2]);
 
-	tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+	tm_req->resp_len =
+		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
 
 	rsp_buf = tm_req->resp_buf;
 
@@ -1724,7 +1733,7 @@
 
 	/* Fetch fcp_rsp from task context and perform cmd completion */
 	fcp_rsp = (struct fcoe_fcp_rsp_payload *)
-		   &(task->cmn.general.rsp_info.fcp_rsp.payload);
+		   &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
 
 	/* parse fcp_rsp and obtain sense data from RQ if available */
 	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a2e3830..3e892bd 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -133,6 +133,8 @@
 		/* upload will take care of cleaning up sess resc */
 		lport->tt.rport_logoff(rdata);
 	}
+	/* Arm CQ */
+	bnx2fc_arm_cq(tgt);
 	return;
 
 ofld_err:
@@ -315,6 +317,8 @@
 
 	struct fc_rport *rport = rdata->rport;
 	struct bnx2fc_hba *hba = port->priv;
+	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
+	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
 
 	tgt->rport = rport;
 	tgt->rdata = rdata;
@@ -335,6 +339,7 @@
 	tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
 	tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
 	tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+	atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
 
 	/* Initialize the toggle bit */
 	tgt->sq_curr_toggle_bit = 1;
@@ -345,7 +350,17 @@
 	tgt->rq_cons_idx = 0;
 	atomic_set(&tgt->num_active_ios, 0);
 
-	tgt->work_time_slice = 2;
+	/* initialize sq doorbell */
+	sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+	sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+	/* initialize rx doorbell */
+	rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
+			  (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
+			  (B577XX_FCOE_CONNECTION_TYPE <<
+				B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
+	rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
+		     (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
 
 	spin_lock_init(&tgt->tgt_lock);
 	spin_lock_init(&tgt->cq_lock);
@@ -758,8 +773,6 @@
 	}
 	memset(tgt->lcq, 0, tgt->lcq_mem_size);
 
-	/* Arm CQ */
-	tgt->conn_db->cq_arm.lo = -1;
 	tgt->conn_db->rq_prod = 0x8000;
 
 	return 0;
@@ -787,6 +800,8 @@
 		iounmap(tgt->ctx_base);
 		tgt->ctx_base = NULL;
 	}
+
+	spin_lock_bh(&tgt->cq_lock);
 	/* Free LCQ */
 	if (tgt->lcq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@@ -828,17 +843,16 @@
 		tgt->rq = NULL;
 	}
 	/* Free CQ */
-	spin_lock_bh(&tgt->cq_lock);
 	if (tgt->cq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
 				    tgt->cq, tgt->cq_dma);
 		tgt->cq = NULL;
 	}
-	spin_unlock_bh(&tgt->cq_lock);
 	/* Free SQ */
 	if (tgt->sq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
 				    tgt->sq, tgt->sq_dma);
 		tgt->sq = NULL;
 	}
+	spin_unlock_bh(&tgt->cq_lock);
 }
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dad6c8a..71890a0 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -707,8 +707,10 @@
 #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
 #elif defined(__LITTLE_ENDIAN)
 	u8 conn_flags;
 #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
@@ -719,8 +721,10 @@
 #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
 	u8 reserved2;
 	u8 max_outstanding_r2ts;
 	u8 session_error_recovery_level;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index f356c56..09957bd 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -31,6 +31,8 @@
 #include <linux/fcntl.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
 #include <bcmdefs.h>
 #include <bcmutils.h>
 
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 15e1b05..35eec91 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -18,6 +18,7 @@
 #include <linux/semaphore.h>
 #include <bcmdefs.h>
 #include <linux/netdevice.h>
+#include <linux/hardirq.h>
 #include <wlioctl.h>
 
 #include <bcmutils.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
index 6c6236c..8261229 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
+++ b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
@@ -24,6 +24,7 @@
 #include <linux/pci.h>
 #include <linux/sched.h>
 #include <linux/firmware.h>
+#include <linux/interrupt.h>
 #include <net/mac80211.h>
 
 #include <proto/802.11.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h
index e703d8b..f7a58b7 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h
+++ b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h
@@ -17,6 +17,8 @@
 #ifndef _wl_mac80211_h_
 #define _wl_mac80211_h_
 
+#include <linux/interrupt.h>
+
 /* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
  * sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
  * submitted to workqueue instead of being on kernel timer
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 16aa6a8..4384d93 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -32,6 +32,7 @@
 #include <linux/semaphore.h>
 #include <linux/wireless.h>
 #include <linux/ieee80211.h>
+#include <linux/interrupt.h>
 
 #define KEY_TYPE_NA		0x0
 #define KEY_TYPE_WEP40 		0x1
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 736a140..00ee02f 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/version.h>
+#include <linux/interrupt.h>
 #include <asm/uaccess.h>
 
 #include "dot11d.h"
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index d15bdf6..a2c46ae 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -18,6 +18,7 @@
 #ifndef R8180H
 #define R8180H
 
+#include <linux/interrupt.h>
 
 #define RTL8180_MODULE_NAME "r8180"
 #define DMESG(x,a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 2155a77..bae7d85 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/eeprom_93cx6.h>
+#include <linux/interrupt.h>
 
 #include "r8180_hw.h"
 #include "r8180.h"
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index dbe21ab..82bc59a 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -31,6 +31,7 @@
 #include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
+#include <linux/interrupt.h>
 
 #include <linux/delay.h>
 #include <linux/wireless.h>
diff --git a/drivers/staging/rtl8192e/r8192E.h b/drivers/staging/rtl8192e/r8192E.h
index 0229031..89fe8fc 100644
--- a/drivers/staging/rtl8192e/r8192E.h
+++ b/drivers/staging/rtl8192e/r8192E.h
@@ -36,6 +36,7 @@
 #include <linux/if_arp.h>
 #include <linux/random.h>
 #include <linux/version.h>
+#include <linux/interrupt.h>
 #include <asm/io.h>
 #include "ieee80211/rtl819x_HT.h"
 #include "ieee80211/ieee80211.h"
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index 58d800f..19a9a07 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -27,6 +27,8 @@
 
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
 #include <asm/uaccess.h>
 #include "r8192E_hw.h"
 #include "r8192E.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index e716f7b..2333257 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -31,6 +31,7 @@
 #include <linux/timer.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
+#include <linux/interrupt.h>
 
 #include <linux/delay.h>
 #include <linux/wireless.h>
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
index 7d650a0..7216b0d 100644
--- a/include/linux/arcdevice.h
+++ b/include/linux/arcdevice.h
@@ -20,6 +20,7 @@
 #include <linux/if_arcnet.h>
 
 #ifdef __KERNEL__
+#include  <linux/irqreturn.h>
 
 #ifndef bool
 #define bool int
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 08763e4..6ff080e 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -6,6 +6,7 @@
 
 #include <linux/bcma/bcma_driver_chipcommon.h>
 #include <linux/bcma/bcma_driver_pci.h>
+#include <linux/ssb/ssb.h> /* SPROM sharing */
 
 #include "bcma_regs.h"
 
@@ -31,6 +32,12 @@
 	void (*write8)(struct bcma_device *core, u16 offset, u8 value);
 	void (*write16)(struct bcma_device *core, u16 offset, u16 value);
 	void (*write32)(struct bcma_device *core, u16 offset, u32 value);
+#ifdef CONFIG_BCMA_BLOCKIO
+	void (*block_read)(struct bcma_device *core, void *buffer,
+			   size_t count, u16 offset, u8 reg_width);
+	void (*block_write)(struct bcma_device *core, const void *buffer,
+			    size_t count, u16 offset, u8 reg_width);
+#endif
 	/* Agent ops */
 	u32 (*aread32)(struct bcma_device *core, u16 offset);
 	void (*awrite32)(struct bcma_device *core, u16 offset, u32 value);
@@ -117,6 +124,8 @@
 	struct bcma_device_id id;
 
 	struct device dev;
+	struct device *dma_dev;
+	unsigned int irq;
 	bool dev_registered;
 
 	u8 core_index;
@@ -179,6 +188,10 @@
 
 	struct bcma_drv_cc drv_cc;
 	struct bcma_drv_pci drv_pci;
+
+	/* We decided to share SPROM struct with SSB as long as we do not need
+	 * any hacks for BCMA. This simplifies drivers code. */
+	struct ssb_sprom sprom;
 };
 
 extern inline u32 bcma_read8(struct bcma_device *core, u16 offset)
@@ -208,6 +221,18 @@
 {
 	core->bus->ops->write32(core, offset, value);
 }
+#ifdef CONFIG_BCMA_BLOCKIO
+extern inline void bcma_block_read(struct bcma_device *core, void *buffer,
+				   size_t count, u16 offset, u8 reg_width)
+{
+	core->bus->ops->block_read(core, buffer, count, offset, reg_width);
+}
+extern inline void bcma_block_write(struct bcma_device *core, const void *buffer,
+				    size_t count, u16 offset, u8 reg_width)
+{
+	core->bus->ops->block_write(core, buffer, count, offset, reg_width);
+}
+#endif
 extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
 {
 	return core->bus->ops->aread32(core, offset);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 083c3b6..9c5b69f 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -244,6 +244,7 @@
 #define BCMA_CC_REGCTL_DATA		0x065C
 #define BCMA_CC_PLLCTL_ADDR		0x0660
 #define BCMA_CC_PLLCTL_DATA		0x0664
+#define BCMA_CC_SPROM			0x0830 /* SPROM beginning */
 
 /* Data for the PMU, if available.
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
new file mode 100644
index 0000000..f932093
--- /dev/null
+++ b/include/linux/cordic.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CORDIC_H_
+#define __CORDIC_H_
+
+#include <linux/types.h>
+
+/**
+ * struct cordic_iq - i/q coordinate.
+ *
+ * @i: real part of coordinate (in phase).
+ * @q: imaginary part of coordinate (quadrature).
+ */
+struct cordic_iq {
+	s32 i;
+	s32 q;
+};
+
+/**
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle.
+ *
+ * @theta: angle in degrees for which i/q coordinate is to be calculated.
+ * @coord: function output parameter holding the i/q coordinate.
+ *
+ * The function calculates the i/q coordinate for a given angle using
+ * cordic algorithm. The coordinate consists of a real (i) and an
+ * imaginary (q) part. The real part is essentially the cosine of the
+ * angle and the imaginary part is the sine of the angle. The returned
+ * values are scaled by 2^16 for precision. The range for theta is
+ * for -180 degrees to +180 degrees. Passed values outside this range are
+ * converted before doing the actual calculation.
+ */
+struct cordic_iq cordic_calc_iq(s32 theta);
+
+#endif /* __CORDIC_H_ */
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
new file mode 100644
index 0000000..13c8dab
--- /dev/null
+++ b/include/linux/crc8.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CRC8_H_
+#define __CRC8_H_
+
+#include <linux/types.h>
+
+/* see usage of this value in crc8() description */
+#define CRC8_INIT_VALUE		0xFF
+
+/*
+ * Return value of crc8() indicating valid message+crc. This is true
+ * if a CRC is inverted before transmission. The CRC computed over the
+ * whole received bitstream is _table[x], where x is the bit pattern
+ * of the modification (almost always 0xff).
+ */
+#define CRC8_GOOD_VALUE(_table)	(_table[0xFF])
+
+/* required table size for crc8 algorithm */
+#define CRC8_TABLE_SIZE			256
+
+/* helper macro assuring right table size is used */
+#define DECLARE_CRC8_TABLE(_table) \
+	static u8 _table[CRC8_TABLE_SIZE]
+
+/**
+ * crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
+ *
+ * @table:	table to be filled.
+ * @polynomial:	polynomial for which table is to be filled.
+ *
+ * This function fills the provided table according the polynomial provided for
+ * regular bit order (lsb first). Polynomials in CRC algorithms are typically
+ * represented as shown below.
+ *
+ *	poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1
+ *
+ * For lsb first direction x^7 maps to the lsb. So the polynomial is as below.
+ *
+ * - lsb first: poly = 10101011(1) = 0xAB
+ */
+void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
+
+/**
+ * crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
+ *
+ * @table:	table to be filled.
+ * @polynomial:	polynomial for which table is to be filled.
+ *
+ * This function fills the provided table according the polynomial provided for
+ * reverse bit order (msb first). Polynomials in CRC algorithms are typically
+ * represented as shown below.
+ *
+ *	poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1
+ *
+ * For msb first direction x^7 maps to the msb. So the polynomial is as below.
+ *
+ * - msb first: poly = (1)11010101 = 0xD5
+ */
+void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
+
+/**
+ * crc8() - calculate a crc8 over the given input data.
+ *
+ * @table:	crc table used for calculation.
+ * @pdata:	pointer to data buffer.
+ * @nbytes:	number of bytes in data buffer.
+ * @crc:	previous returned crc8 value.
+ *
+ * The CRC8 is calculated using the polynomial given in crc8_populate_msb()
+ * or crc8_populate_lsb().
+ *
+ * The caller provides the initial value (either %CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When validating a byte
+ * stream (including CRC8), a final return value of %CRC8_GOOD_VALUE
+ * indicates the byte stream data can be considered valid.
+ *
+ * Reference:
+ * "A Painless Guide to CRC Error Detection Algorithms", ver 3, Aug 1993
+ * Williams, Ross N., ross<at>ross.net
+ * (see URL http://www.ross.net/crc/download/crc_v3.txt).
+ */
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+
+#endif /* __CRC8_H_ */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index d638e85..710c043 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -236,6 +236,7 @@
 #ifdef __KERNEL__
 
 #include <linux/in.h>
+#include <linux/interrupt.h>
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/uio.h>
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 439b173..048d0fa 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -287,7 +287,7 @@
 	ETH_SS_TEST		= 0,
 	ETH_SS_STATS,
 	ETH_SS_PRIV_FLAGS,
-	ETH_SS_NTUPLE_FILTERS,
+	ETH_SS_NTUPLE_FILTERS,	/* Do not use, GRXNTUPLE is now deprecated */
 	ETH_SS_FEATURES,
 };
 
@@ -714,18 +714,6 @@
 /* needed by dev_disable_lro() */
 extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
 
-struct ethtool_rx_ntuple_flow_spec_container {
-	struct ethtool_rx_ntuple_flow_spec fs;
-	struct list_head list;
-};
-
-struct ethtool_rx_ntuple_list {
-#define ETHTOOL_MAX_NTUPLE_LIST_ENTRY 1024
-#define ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY 14
-	struct list_head	list;
-	unsigned int		count;
-};
-
 /**
  * enum ethtool_phys_id_state - indicator state for physical identification
  * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
@@ -758,7 +746,6 @@
 int ethtool_op_set_ufo(struct net_device *dev, u32 data);
 u32 ethtool_op_get_flags(struct net_device *dev);
 int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
-void ethtool_ntuple_flush(struct net_device *dev);
 bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
 
 /**
@@ -865,7 +852,6 @@
  *	error code or zero.
  * @set_rx_ntuple: Set an RX n-tuple rule.  Returns a negative error code
  *	or zero.
- * @get_rx_ntuple: Deprecated.
  * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
  *	Returns a negative error code or zero.
  * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
@@ -944,7 +930,6 @@
 	int	(*reset)(struct net_device *, u32 *);
 	int	(*set_rx_ntuple)(struct net_device *,
 				 struct ethtool_rx_ntuple *);
-	int	(*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
 	int	(*get_rxfh_indir)(struct net_device *,
 				  struct ethtool_rxfh_indir *);
 	int	(*set_rxfh_indir)(struct net_device *,
@@ -1017,7 +1002,7 @@
 #define ETHTOOL_FLASHDEV	0x00000033 /* Flash firmware to device */
 #define ETHTOOL_RESET		0x00000034 /* Reset hardware */
 #define ETHTOOL_SRXNTUPLE	0x00000035 /* Add an n-tuple filter to device */
-#define ETHTOOL_GRXNTUPLE	0x00000036 /* Get n-tuple filters from device */
+#define ETHTOOL_GRXNTUPLE	0x00000036 /* deprecated */
 #define ETHTOOL_GSSET_INFO	0x00000037 /* Get string set info */
 #define ETHTOOL_GRXFHINDIR	0x00000038 /* Get RX flow hash indir'n table */
 #define ETHTOOL_SRXFHINDIR	0x00000039 /* Set RX flow hash indir'n table */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 359fba8..103113a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -128,6 +128,8 @@
 /* 1000BASE-T Control register */
 #define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
 #define ADVERTISE_1000HALF      0x0100  /* Advertise 1000BASE-T half duplex */
+#define CTL1000_AS_MASTER	0x0800
+#define CTL1000_ENABLE_MASTER	0x1000
 
 /* 1000BASE-T Status register */
 #define LPA_1000LOCALRXOK       0x2000  /* Link partner local receiver status */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 54b8b4d..22a8cec 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1348,9 +1348,6 @@
 	/* max exchange id for FCoE LRO by ddp */
 	unsigned int		fcoe_ddp_xid;
 #endif
-	/* n-tuple filter list attached to this device */
-	struct ethtool_rx_ntuple_list ethtool_ntuple_list;
-
 	/* phy device may attach itself for hardware timestamping */
 	struct phy_device *phydev;
 
@@ -1563,7 +1560,6 @@
 	struct list_head	list;
 };
 
-#include <linux/interrupt.h>
 #include <linux/notifier.h>
 
 extern rwlock_t				dev_base_lock;		/* Device list lock */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 5a262e3..3540c6e 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -104,6 +104,8 @@
 	IPSET_ATTR_NAMEREF,
 	IPSET_ATTR_IP2,
 	IPSET_ATTR_CIDR2,
+	IPSET_ATTR_IP2_TO,
+	IPSET_ATTR_IFACE,
 	__IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX	(__IPSET_ATTR_ADT_MAX - 1)
@@ -142,12 +144,18 @@
 enum ipset_cmd_flags {
 	IPSET_FLAG_BIT_EXIST	= 0,
 	IPSET_FLAG_EXIST	= (1 << IPSET_FLAG_BIT_EXIST),
+	IPSET_FLAG_BIT_LIST_SETNAME = 1,
+	IPSET_FLAG_LIST_SETNAME	= (1 << IPSET_FLAG_BIT_LIST_SETNAME),
+	IPSET_FLAG_BIT_LIST_HEADER = 2,
+	IPSET_FLAG_LIST_HEADER	= (1 << IPSET_FLAG_BIT_LIST_HEADER),
 };
 
 /* Flags at CADT attribute level */
 enum ipset_cadt_flags {
 	IPSET_FLAG_BIT_BEFORE	= 0,
 	IPSET_FLAG_BEFORE	= (1 << IPSET_FLAG_BIT_BEFORE),
+	IPSET_FLAG_BIT_PHYSDEV	= 1,
+	IPSET_FLAG_PHYSDEV	= (1 << IPSET_FLAG_BIT_PHYSDEV),
 };
 
 /* Commands with settype-specific attributes */
@@ -165,6 +173,7 @@
 #include <linux/ipv6.h>
 #include <linux/netlink.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 
@@ -206,6 +215,8 @@
 	IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
 	IPSET_TYPE_NAME_FLAG = 4,
 	IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+	IPSET_TYPE_IFACE_FLAG = 5,
+	IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
 	/* Strictly speaking not a feature, but a flag for dumping:
 	 * this settype must be dumped last */
 	IPSET_DUMP_LAST_FLAG = 7,
@@ -214,7 +225,17 @@
 
 struct ip_set;
 
-typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
+			   u32 timeout, u32 flags);
+
+/* Kernel API function options */
+struct ip_set_adt_opt {
+	u8 family;		/* Actual protocol family */
+	u8 dim;			/* Dimension of match/target */
+	u8 flags;		/* Direction and negation flags */
+	u32 cmdflags;		/* Command-like flags */
+	u32 timeout;		/* Timeout value */
+};
 
 /* Set type, variant-specific part */
 struct ip_set_type_variant {
@@ -223,14 +244,15 @@
 	 *			zero for no match/success to add/delete
 	 *			positive for matching element */
 	int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
-		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+		    const struct xt_action_param *par,
+		    enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 
 	/* Userspace: test/add/del entries
 	 *		returns negative error code,
 	 *			zero for no match/success to add/delete
 	 *			positive for matching element */
 	int (*uadt)(struct ip_set *set, struct nlattr *tb[],
-		    enum ipset_adt adt, u32 *lineno, u32 flags);
+		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
 
 	/* Low level add/del/test functions */
 	ipset_adtfn adt[IPSET_ADT_MAX];
@@ -268,8 +290,8 @@
 	u8 dimension;
 	/* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
 	u8 family;
-	/* Type revision */
-	u8 revision;
+	/* Type revisions */
+	u8 revision_min, revision_max;
 
 	/* Create set */
 	int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
@@ -300,6 +322,8 @@
 	const struct ip_set_type_variant *variant;
 	/* The actual INET family of the set */
 	u8 family;
+	/* The type revision */
+	u8 revision;
 	/* The type specific data */
 	void *data;
 };
@@ -307,21 +331,25 @@
 /* register and unregister set references */
 extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
 extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern const char *ip_set_name_byindex(ip_set_id_t index);
 extern ip_set_id_t ip_set_nfnl_get(const char *name);
 extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
 extern void ip_set_nfnl_put(ip_set_id_t index);
 
 /* API for iptables set match, and SET target */
+
 extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
-		      u8 family, u8 dim, u8 flags);
+		      const struct xt_action_param *par,
+		      const struct ip_set_adt_opt *opt);
 extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
-		      u8 family, u8 dim, u8 flags);
+		      const struct xt_action_param *par,
+		      const struct ip_set_adt_opt *opt);
 extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
-		       u8 family, u8 dim, u8 flags);
+		       const struct xt_action_param *par,
+		       const struct ip_set_adt_opt *opt);
 
 /* Utility functions */
-extern void * ip_set_alloc(size_t size);
+extern void *ip_set_alloc(size_t size);
 extern void ip_set_free(void *members);
 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
@@ -331,7 +359,7 @@
 {
 	__be32 ip;
 	int ret = ip_set_get_ipaddr4(nla, &ip);
-	
+
 	if (ret)
 		return ret;
 	*ipaddr = ntohl(ip);
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ac3c822..c5b06aa 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -5,6 +5,11 @@
 #include <linux/jhash.h>
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 
+#define CONCAT(a, b, c)		a##b##c
+#define TOKEN(a, b, c)		CONCAT(a, b, c)
+
+#define type_pf_next		TOKEN(TYPE, PF, _elem)
+
 /* Hashing which uses arrays to resolve clashing. The hash table is resized
  * (doubled) when searching becomes too long.
  * Internally jhash is used with the assumption that the size of the
@@ -38,7 +43,7 @@
 	struct hbucket bucket[0]; /* hashtable buckets */
 };
 
-#define hbucket(h, i)		&((h)->bucket[i])
+#define hbucket(h, i)		(&((h)->bucket[i]))
 
 /* Book-keeping of the prefixes added to the set */
 struct ip_set_hash_nets {
@@ -54,9 +59,13 @@
 	u32 initval;		/* random jhash init value */
 	u32 timeout;		/* timeout value, if enabled */
 	struct timer_list gc;	/* garbage collection when timeout enabled */
+	struct type_pf_next next; /* temporary storage for uadd */
 #ifdef IP_SET_HASH_WITH_NETMASK
 	u8 netmask;		/* netmask value for subnets to store */
 #endif
+#ifdef IP_SET_HASH_WITH_RBTREE
+	struct rb_root rbtree;
+#endif
 #ifdef IP_SET_HASH_WITH_NETS
 	struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
 #endif
@@ -194,6 +203,9 @@
 		del_timer_sync(&h->gc);
 
 	ahash_destroy(h->table);
+#ifdef IP_SET_HASH_WITH_RBTREE
+	rbtree_destroy(&h->rbtree);
+#endif
 	kfree(h);
 
 	set->data = NULL;
@@ -217,6 +229,7 @@
 #define type_pf_data_netmask	TOKEN(TYPE, PF, _data_netmask)
 #define type_pf_data_list	TOKEN(TYPE, PF, _data_list)
 #define type_pf_data_tlist	TOKEN(TYPE, PF, _data_tlist)
+#define type_pf_data_next	TOKEN(TYPE, PF, _data_next)
 
 #define type_pf_elem		TOKEN(TYPE, PF, _elem)
 #define type_pf_telem		TOKEN(TYPE, PF, _telem)
@@ -346,10 +359,13 @@
 	return 0;
 }
 
+static void
+type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d);
+
 /* Add an element to a hash and update the internal counters when succeeded,
  * otherwise report the proper error code. */
 static int
-type_pf_add(struct ip_set *set, void *value, u32 timeout)
+type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t;
@@ -372,8 +388,11 @@
 		}
 
 	ret = type_pf_elem_add(n, value);
-	if (ret != 0)
+	if (ret != 0) {
+		if (ret == -EAGAIN)
+			type_pf_data_next(h, d);
 		goto out;
+	}
 
 #ifdef IP_SET_HASH_WITH_NETS
 	add_cidr(h, d->cidr, HOST_MASK);
@@ -388,7 +407,7 @@
  * and free up space if possible.
  */
 static int
-type_pf_del(struct ip_set *set, void *value, u32 timeout)
+type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t = h->table;
@@ -463,7 +482,7 @@
 
 /* Test whether the element is added to the set */
 static int
-type_pf_test(struct ip_set *set, void *value, u32 timeout)
+type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t = h->table;
@@ -586,10 +605,11 @@
 
 static int
 type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
-	     enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+	     const struct xt_action_param *par,
+	     enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 static int
 type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
-	     enum ipset_adt adt, u32 *lineno, u32 flags);
+	     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
 
 static const struct ip_set_type_variant type_pf_variant = {
 	.kadt	= type_pf_kadt,
@@ -776,7 +796,7 @@
 }
 
 static int
-type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t = h->table;
@@ -784,6 +804,7 @@
 	struct hbucket *n;
 	struct type_pf_elem *data;
 	int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+	bool flag_exist = flags & IPSET_FLAG_EXIST;
 	u32 key;
 
 	if (h->elements >= h->maxelem)
@@ -799,7 +820,7 @@
 	for (i = 0; i < n->pos; i++) {
 		data = ahash_tdata(n, i);
 		if (type_pf_data_equal(data, d)) {
-			if (type_pf_data_expired(data))
+			if (type_pf_data_expired(data) || flag_exist)
 				j = i;
 			else {
 				ret = -IPSET_ERR_EXIST;
@@ -820,8 +841,11 @@
 		goto out;
 	}
 	ret = type_pf_elem_tadd(n, d, timeout);
-	if (ret != 0)
+	if (ret != 0) {
+		if (ret == -EAGAIN)
+			type_pf_data_next(h, d);
 		goto out;
+	}
 
 #ifdef IP_SET_HASH_WITH_NETS
 	add_cidr(h, d->cidr, HOST_MASK);
@@ -833,7 +857,7 @@
 }
 
 static int
-type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t = h->table;
@@ -905,7 +929,7 @@
 #endif
 
 static int
-type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct ip_set_hash *h = set->data;
 	struct htable *t = h->table;
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
index b86f15c..e2a9fae 100644
--- a/include/linux/netfilter/ipset/ip_set_hash.h
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -11,6 +11,10 @@
 	IPSET_ERR_INVALID_PROTO,
 	/* Protocol missing but must be specified */
 	IPSET_ERR_MISSING_PROTO,
+	/* Range not supported */
+	IPSET_ERR_HASH_RANGE_UNSUPPORTED,
+	/* Invalid range */
+	IPSET_ERR_HASH_RANGE,
 };
 
 #ifdef __KERNEL__
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index bcdd40a..4792320 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -22,6 +22,9 @@
 
 #define with_timeout(timeout)	((timeout) != IPSET_NO_TIMEOUT)
 
+#define opt_timeout(opt, map)	\
+	(with_timeout((opt)->timeout) ? (opt)->timeout : (map)->timeout)
+
 static inline unsigned int
 ip_set_timeout_uget(struct nlattr *tb)
 {
@@ -75,7 +78,7 @@
 static inline u32
 ip_set_timeout_get(unsigned long timeout)
 {
-	return timeout == IPSET_ELEM_PERMANENT ? 0 : 
+	return timeout == IPSET_ELEM_PERMANENT ? 0 :
 		jiffies_to_msecs(timeout - jiffies)/1000;
 }
 
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
index 0e1fb50..199fd11 100644
--- a/include/linux/netfilter/ipset/pfxlen.h
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -2,7 +2,8 @@
 #define _PFXLEN_H
 
 #include <asm/byteorder.h>
-#include <linux/netfilter.h> 
+#include <linux/netfilter.h>
+#include <net/tcp.h>
 
 /* Prefixlen maps, by Jan Engelhardt  */
 extern const union nf_inet_addr ip_set_netmask_map[];
@@ -32,4 +33,12 @@
 	return &ip_set_hostmask_map[pfxlen].ip6[0];
 }
 
+extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr);
+
+#define ip_set_mask_from_to(from, to, cidr)	\
+do {						\
+	from &= ip_set_hostmask(cidr);		\
+	to = from | ~ip_set_hostmask(cidr);	\
+} while (0)
+
 #endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
index 081f1de..c0405ac 100644
--- a/include/linux/netfilter/xt_set.h
+++ b/include/linux/netfilter/xt_set.h
@@ -35,7 +35,7 @@
 	struct xt_set_info_v0 del_set;
 };
 
-/* Revision 1: current interface to netfilter/iptables */
+/* Revision 1  match and target */
 
 struct xt_set_info {
 	ip_set_id_t index;
@@ -44,13 +44,22 @@
 };
 
 /* match and target infos */
-struct xt_set_info_match {
+struct xt_set_info_match_v1 {
 	struct xt_set_info match_set;
 };
 
-struct xt_set_info_target {
+struct xt_set_info_target_v1 {
 	struct xt_set_info add_set;
 	struct xt_set_info del_set;
 };
 
+/* Revision 2 target */
+
+struct xt_set_info_target_v2 {
+	struct xt_set_info add_set;
+	struct xt_set_info del_set;
+	u32 flags;
+	u32 timeout;
+};
+
 #endif /*_XT_SET_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index a9dd895..fdd0188 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -221,7 +221,8 @@
 	int			(*dump)(struct sk_buff * skb,
 					struct netlink_callback *cb);
 	int			(*done)(struct netlink_callback *cb);
-	int			family;
+	u16			family;
+	u16			min_dump_alloc;
 	long			args[6];
 };
 
@@ -259,7 +260,8 @@
 extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
 			      const struct nlmsghdr *nlh,
 			      int (*dump)(struct sk_buff *skb, struct netlink_callback*),
-			      int (*done)(struct netlink_callback*));
+			      int (*done)(struct netlink_callback*),
+			      u16 min_dump_alloc);
 
 
 #define NL_NONROOT_RECV 0x1
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c0a4f3a..3e54337 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2028,8 +2028,7 @@
  * skb_tx_timestamp() - Driver hook for transmit timestamping
  *
  * Ethernet MAC Drivers should call this function in their hard_xmit()
- * function as soon as possible after giving the sk_buff to the MAC
- * hardware, but before freeing the sk_buff.
+ * function immediately before giving the sk_buff to the MAC hardware.
  *
  * @skb: A socket buffer.
  */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e64f4c6..531ede8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -282,6 +282,7 @@
 #endif
 	u32				rcv_isn;
 	u32				snt_isn;
+	u32				snt_synack; /* synack sent time */
 };
 
 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 136040b..970d5a2 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -63,6 +63,7 @@
  * specify GSO or CSUM features, you can simply ignore the header. */
 struct virtio_net_hdr {
 #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	// Use csum_start, csum_offset
+#define VIRTIO_NET_HDR_F_DATA_VALID	2	// Csum is valid
 	__u8 flags;
 #define VIRTIO_NET_HDR_GSO_NONE		0	// Not a GSO frame
 #define VIRTIO_NET_HDR_GSO_TCPV4	1	// GSO frame, IPv4 TCP (TSO)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6c994c0..7851c05 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,6 +25,7 @@
 #ifndef __HCI_CORE_H
 #define __HCI_CORE_H
 
+#include <linux/interrupt.h>
 #include <net/bluetooth/hci.h>
 
 /* HCI upper protocols */
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
new file mode 100644
index 0000000..c5dedd8
--- /dev/null
+++ b/include/net/caif/caif_hsi.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author:  Daniel Martensson / daniel.martensson@stericsson.com
+ *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_HSI_H_
+#define CAIF_HSI_H_
+
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_device.h>
+#include <linux/atomic.h>
+
+/*
+ * Maximum number of CAIF frames that can reside in the same HSI frame.
+ */
+#define CFHSI_MAX_PKTS 15
+
+/*
+ * Maximum number of bytes used for the frame that can be embedded in the
+ * HSI descriptor.
+ */
+#define CFHSI_MAX_EMB_FRM_SZ 96
+
+/*
+ * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
+ * debugging. Both TX and RX buffers will be filled before the transfer.
+ */
+#define CFHSI_DBG_PREFILL		0
+
+/* Structure describing a HSI packet descriptor. */
+#pragma pack(1) /* Byte alignment. */
+struct cfhsi_desc {
+	u8 header;
+	u8 offset;
+	u16 cffrm_len[CFHSI_MAX_PKTS];
+	u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
+};
+#pragma pack() /* Default alignment. */
+
+/* Size of the complete HSI packet descriptor. */
+#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
+
+/*
+ * Size of the complete HSI packet descriptor excluding the optional embedded
+ * CAIF frame.
+ */
+#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
+
+/*
+ * Maximum bytes transferred in one transfer.
+ */
+/* TODO: 4096 is temporary... */
+#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * 4096)
+
+/* Size of the complete HSI TX buffer. */
+#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
+
+/* Size of the complete HSI RX buffer. */
+#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
+
+/* Bitmasks for the HSI descriptor. */
+#define CFHSI_PIGGY_DESC		(0x01 << 7)
+
+#define CFHSI_TX_STATE_IDLE			0
+#define CFHSI_TX_STATE_XFER			1
+
+#define CFHSI_RX_STATE_DESC			0
+#define CFHSI_RX_STATE_PAYLOAD			1
+
+/* Bitmasks for power management. */
+#define CFHSI_WAKE_UP				0
+#define CFHSI_WAKE_UP_ACK			1
+#define CFHSI_WAKE_DOWN_ACK			2
+#define CFHSI_AWAKE				3
+#define CFHSI_PENDING_RX			4
+#define CFHSI_SHUTDOWN				6
+#define CFHSI_FLUSH_FIFO			7
+
+#ifndef CFHSI_INACTIVITY_TOUT
+#define CFHSI_INACTIVITY_TOUT			(1 * HZ)
+#endif /* CFHSI_INACTIVITY_TOUT */
+
+#ifndef CFHSI_WAKEUP_TOUT
+#define CFHSI_WAKEUP_TOUT			(3 * HZ)
+#endif /* CFHSI_WAKEUP_TOUT */
+
+
+/* Structure implemented by the CAIF HSI driver. */
+struct cfhsi_drv {
+	void (*tx_done_cb) (struct cfhsi_drv *drv);
+	void (*rx_done_cb) (struct cfhsi_drv *drv);
+	void (*wake_up_cb) (struct cfhsi_drv *drv);
+	void (*wake_down_cb) (struct cfhsi_drv *drv);
+};
+
+/* Structure implemented by HSI device. */
+struct cfhsi_dev {
+	int (*cfhsi_up) (struct cfhsi_dev *dev);
+	int (*cfhsi_down) (struct cfhsi_dev *dev);
+	int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_dev *dev);
+	int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev);
+	int (*cfhsi_wake_up) (struct cfhsi_dev *dev);
+	int (*cfhsi_wake_down) (struct cfhsi_dev *dev);
+	int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy);
+	int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev);
+	struct cfhsi_drv *drv;
+};
+
+/* Structure implemented by CAIF HSI drivers. */
+struct cfhsi {
+	struct caif_dev_common cfdev;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+	struct sk_buff_head qhead;
+	struct cfhsi_drv drv;
+	struct cfhsi_dev *dev;
+	int tx_state;
+	int rx_state;
+	int rx_len;
+	u8 *rx_ptr;
+	u8 *tx_buf;
+	u8 *rx_buf;
+	spinlock_t lock;
+	int flow_off_sent;
+	u32 q_low_mark;
+	u32 q_high_mark;
+	struct list_head list;
+	struct work_struct wake_up_work;
+	struct work_struct wake_down_work;
+	struct work_struct rx_done_work;
+	struct work_struct tx_done_work;
+	struct workqueue_struct *wq;
+	wait_queue_head_t wake_up_wait;
+	wait_queue_head_t wake_down_wait;
+	wait_queue_head_t flush_fifo_wait;
+	struct timer_list timer;
+	unsigned long bits;
+};
+
+extern struct platform_driver cfhsi_driver;
+
+#endif		/* CAIF_HSI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0589f55..6cb2543 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1284,6 +1284,12 @@
  *	frame on another channel
  *
  * @testmode_cmd: run a test mode command
+ * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
+ *	used by the function, but 0 and 1 must not be touched. Additionally,
+ *	return error codes other than -ENOBUFS and -ENOENT will terminate the
+ *	dump and return to userspace with an error, so be careful. If any data
+ *	was passed in from userspace then the data/len arguments will be present
+ *	and point to the data contained in %NL80211_ATTR_TESTDATA.
  *
  * @set_bitrate_mask: set the bitrate mask configuration
  *
@@ -1433,6 +1439,9 @@
 
 #ifdef CONFIG_NL80211_TESTMODE
 	int	(*testmode_cmd)(struct wiphy *wiphy, void *data, int len);
+	int	(*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
+				 struct netlink_callback *cb,
+				 void *data, int len);
 #endif
 
 	int	(*set_bitrate_mask)(struct wiphy *wiphy,
@@ -2849,8 +2858,10 @@
 void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp);
 
 #define CFG80211_TESTMODE_CMD(cmd)	.testmode_cmd = (cmd),
+#define CFG80211_TESTMODE_DUMP(cmd)	.testmode_dump = (cmd),
 #else
 #define CFG80211_TESTMODE_CMD(cmd)
+#define CFG80211_TESTMODE_DUMP(cmd)
 #endif
 
 /**
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 8a159cc..39d1230 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -32,13 +32,17 @@
 	struct inet_peer __rcu	*avl_left, *avl_right;
 	struct inetpeer_addr	daddr;
 	__u32			avl_height;
-	struct list_head	unused;
-	__u32			dtime;		/* the time of last use of not
-						 * referenced entries */
-	atomic_t		refcnt;
+
+	u32			metrics[RTAX_MAX];
+	u32			rate_tokens;	/* rate limiting for ICMP */
+	unsigned long		rate_last;
+	unsigned long		pmtu_expires;
+	u32			pmtu_orig;
+	u32			pmtu_learned;
+	struct inetpeer_addr_base redirect_learned;
 	/*
 	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
-	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
 	 * We can share memory with rcu_head to help keep inet_peer small.
 	 */
 	union {
@@ -47,16 +51,14 @@
 			atomic_t			ip_id_count;	/* IP ID for the next packet */
 			__u32				tcp_ts;
 			__u32				tcp_ts_stamp;
-			u32				metrics[RTAX_MAX];
-			u32				rate_tokens;	/* rate limiting for ICMP */
-			unsigned long			rate_last;
-			unsigned long			pmtu_expires;
-			u32				pmtu_orig;
-			u32				pmtu_learned;
-			struct inetpeer_addr_base	redirect_learned;
 		};
 		struct rcu_head         rcu;
+		struct inet_peer	*gc_next;
 	};
+
+	/* following fields might be frequently dirtied */
+	__u32			dtime;	/* the time of last use of not referenced entries */
+	atomic_t		refcnt;
 };
 
 void			inet_initpeers(void) __init;
diff --git a/include/net/ip.h b/include/net/ip.h
index 66dd491..e9ea7c7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -228,8 +228,6 @@
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
 extern int inet_peer_maxttl;
-extern int inet_peer_gc_mintime;
-extern int inet_peer_gc_maxtime;
 
 /* From ip_output.c */
 extern int sysctl_ip_dynaddr;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 481f856..b1370c4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -836,8 +836,6 @@
 	int			num_services;    /* no of virtual services */
 
 	rwlock_t		rs_lock;         /* real services table */
-	/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
-	struct lock_class_key	ctl_key;	/* ctl_mutex debuging */
 	/* Trash for destinations */
 	struct list_head	dest_trash;
 	/* Service counters */
@@ -1089,19 +1087,19 @@
 /*
  * IPVS netns init & cleanup functions
  */
-extern int __ip_vs_estimator_init(struct net *net);
-extern int __ip_vs_control_init(struct net *net);
-extern int __ip_vs_protocol_init(struct net *net);
-extern int __ip_vs_app_init(struct net *net);
-extern int __ip_vs_conn_init(struct net *net);
-extern int __ip_vs_sync_init(struct net *net);
-extern void __ip_vs_conn_cleanup(struct net *net);
-extern void __ip_vs_app_cleanup(struct net *net);
-extern void __ip_vs_protocol_cleanup(struct net *net);
-extern void __ip_vs_control_cleanup(struct net *net);
-extern void __ip_vs_estimator_cleanup(struct net *net);
-extern void __ip_vs_sync_cleanup(struct net *net);
-extern void __ip_vs_service_cleanup(struct net *net);
+extern int ip_vs_estimator_net_init(struct net *net);
+extern int ip_vs_control_net_init(struct net *net);
+extern int ip_vs_protocol_net_init(struct net *net);
+extern int ip_vs_app_net_init(struct net *net);
+extern int ip_vs_conn_net_init(struct net *net);
+extern int ip_vs_sync_net_init(struct net *net);
+extern void ip_vs_conn_net_cleanup(struct net *net);
+extern void ip_vs_app_net_cleanup(struct net *net);
+extern void ip_vs_protocol_net_cleanup(struct net *net);
+extern void ip_vs_control_net_cleanup(struct net *net);
+extern void ip_vs_estimator_net_cleanup(struct net *net);
+extern void ip_vs_sync_net_cleanup(struct net *net);
+extern void ip_vs_service_net_cleanup(struct net *net);
 
 /*
  *      IPVS application functions
@@ -1119,8 +1117,6 @@
 
 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_init(void);
-extern void ip_vs_app_cleanup(void);
 
 void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
 void ip_vs_unbind_pe(struct ip_vs_service *svc);
@@ -1223,15 +1219,11 @@
 			     __u8 syncid);
 extern int stop_sync_thread(struct net *net, int state);
 extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
-extern int ip_vs_sync_init(void);
-extern void ip_vs_sync_cleanup(void);
 
 
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
-extern int ip_vs_estimator_init(void);
-extern void ip_vs_estimator_cleanup(void);
 extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
 extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e6d6a66..3b31ec9 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1816,6 +1816,7 @@
  *
  * @testmode_cmd: Implement a cfg80211 test mode command.
  *	The callback can sleep.
+ * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
  *
  * @flush: Flush all pending frames from the hardware queue, making sure
  *	that the hardware queues are empty. If the parameter @drop is set
@@ -1936,6 +1937,9 @@
 	void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
 #ifdef CONFIG_NL80211_TESTMODE
 	int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
+	int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
+			     struct netlink_callback *cb,
+			     void *data, int len);
 #endif
 	void (*flush)(struct ieee80211_hw *hw, bool drop);
 	void (*channel_switch)(struct ieee80211_hw *hw,
@@ -2965,6 +2969,23 @@
  */
 void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
 
+/**
+ * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions
+ *
+ * in order not to harm the system performance and user experience, the device
+ * may request not to allow any rx ba session and tear down existing rx ba
+ * sessions based on system constraints such as periodic BT activity that needs
+ * to limit wlan activity (eg.sco or a2dp)."
+ * in such cases, the intention is to limit the duration of the rx ppdu and
+ * therefore prevent the peer device to use a-mpdu aggregation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ba_rx_bitmap: Bit map of open rx ba per tid
+ * @addr: & to bssid mac address
+ */
+void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+				  const u8 *addr);
+
 /* Rate control API */
 
 /**
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 4093ca7..678f1ff 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -6,11 +6,14 @@
 
 typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
 typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+typedef u16 (*rtnl_calcit_func)(struct sk_buff *);
 
 extern int	__rtnl_register(int protocol, int msgtype,
-				rtnl_doit_func, rtnl_dumpit_func);
+				rtnl_doit_func, rtnl_dumpit_func,
+				rtnl_calcit_func);
 extern void	rtnl_register(int protocol, int msgtype,
-			      rtnl_doit_func, rtnl_dumpit_func);
+			      rtnl_doit_func, rtnl_dumpit_func,
+			      rtnl_calcit_func);
 extern int	rtnl_unregister(int protocol, int msgtype);
 extern void	rtnl_unregister_all(int protocol);
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index b2c2366..6a72a58 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -120,6 +120,7 @@
 				     int flags);
 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
+extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -134,6 +135,7 @@
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
 		    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
+extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 
 /*
  * sctp/primitive.c
@@ -285,20 +287,21 @@
 		pr_cont(fmt, ##args);			\
 } while (0)
 #define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail,			\
-				 args_lead, saddr, args_trail...)	\
+				 args_lead, addr, args_trail...)	\
 do {									\
+	const union sctp_addr *_addr = (addr);				\
 	if (sctp_debug_flag) {						\
-		if (saddr->sa.sa_family == AF_INET6) {			\
+		if (_addr->sa.sa_family == AF_INET6) {			\
 			printk(KERN_DEBUG				\
 			       pr_fmt(fmt_lead "%pI6" fmt_trail),	\
 			       args_lead,				\
-			       &saddr->v6.sin6_addr,			\
+			       &_addr->v6.sin6_addr,			\
 			       args_trail);				\
 		} else {						\
 			printk(KERN_DEBUG				\
 			       pr_fmt(fmt_lead "%pI4" fmt_trail),	\
 			       args_lead,				\
-			       &saddr->v4.sin_addr.s_addr,		\
+			       &_addr->v4.sin_addr.s_addr,		\
 			       args_trail);				\
 		}							\
 	}								\
@@ -598,7 +601,7 @@
 		return AF_INET6;
 	default:
 		return 0;
-	};
+	}
 }
 
 /* Convert from an address parameter type to an address family.  */
@@ -611,7 +614,7 @@
 		return AF_INET6;
 	default:
 		return 0;
-	};
+	}
 }
 
 /* Perform some sanity checks. */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 7df327a..31d7ea2 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -205,6 +205,11 @@
 	 * It is a list of sctp_sockaddr_entry.
 	 */
 	struct list_head local_addr_list;
+	int default_auto_asconf;
+	struct list_head addr_waitq;
+	struct timer_list addr_wq_timer;
+	struct list_head auto_asconf_splist;
+	spinlock_t addr_wq_lock;
 
 	/* Lock that protects the local_addr_list writers */
 	spinlock_t addr_list_lock;
@@ -264,6 +269,11 @@
 #define sctp_port_hashtable		(sctp_globals.port_hashtable)
 #define sctp_local_addr_list		(sctp_globals.local_addr_list)
 #define sctp_local_addr_lock		(sctp_globals.addr_list_lock)
+#define sctp_auto_asconf_splist		(sctp_globals.auto_asconf_splist)
+#define sctp_addr_waitq			(sctp_globals.addr_waitq)
+#define sctp_addr_wq_timer		(sctp_globals.addr_wq_timer)
+#define sctp_addr_wq_lock		(sctp_globals.addr_wq_lock)
+#define sctp_default_auto_asconf	(sctp_globals.default_auto_asconf)
 #define sctp_scope_policy		(sctp_globals.ipv4_scope_policy)
 #define sctp_addip_enable		(sctp_globals.addip_enable)
 #define sctp_addip_noauth		(sctp_globals.addip_noauth_enable)
@@ -341,6 +351,8 @@
 	atomic_t pd_mode;
 	/* Receive to here while partial delivery is in effect. */
 	struct sk_buff_head pd_lobby;
+	struct list_head auto_asconf_list;
+	int do_auto_asconf;
 };
 
 static inline struct sctp_sock *sctp_sk(const struct sock *sk)
@@ -792,6 +804,8 @@
 	__u8 valid;
 };
 
+#define SCTP_ADDRESS_TICK_DELAY	500
+
 typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
 
 /* This structure holds lists of chunks as we are assembling for
@@ -1236,6 +1250,7 @@
 int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
 int sctp_addr_is_valid(const union sctp_addr *addr);
+int sctp_is_ep_boundall(struct sock *sk);
 
 
 /* What type of endpoint?  */
@@ -1898,6 +1913,8 @@
 	 * after reaching 4294967295.
 	 */
 	__u32 addip_serial;
+	union sctp_addr *asconf_addr_del_pending;
+	int src_out_of_asoc_ok;
 
 	/* SCTP AUTH: list of the endpoint shared keys.  These
 	 * keys are provided out of band by the user applicaton
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 32fd512..0842ef0 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -92,6 +92,7 @@
 #define SCTP_LOCAL_AUTH_CHUNKS	27	/* Read only */
 #define SCTP_GET_ASSOC_NUMBER	28	/* Read only */
 #define SCTP_GET_ASSOC_ID_LIST	29	/* Read only */
+#define SCTP_AUTO_ASCONF       30
 
 /* Internal Socket Options. Some of the sctp library functions are
  * implemented using these socket options.
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 479083a..8f0f9ac 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -116,57 +116,51 @@
 	unsigned long	mibs[LINUX_MIB_XFRMMAX];
 };
 
-/* 
- * FIXME: On x86 and some other CPUs the split into user and softirq parts
- * is not needed because addl $1,memory is atomic against interrupts (but 
- * atomic_inc would be overkill because of the lock cycles). Wants new 
- * nonlocked_atomic_inc() primitives -AK
- */ 
+#define SNMP_ARRAY_SZ 1
+
 #define DEFINE_SNMP_STAT(type, name)	\
-	__typeof__(type) __percpu *name[2]
+	__typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
 #define DEFINE_SNMP_STAT_ATOMIC(type, name)	\
 	__typeof__(type) *name
 #define DECLARE_SNMP_STAT(type, name)	\
-	extern __typeof__(type) __percpu *name[2]
-
-#define SNMP_STAT_BHPTR(name)	(name[0])
-#define SNMP_STAT_USRPTR(name)	(name[1])
+	extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
 
 #define SNMP_INC_STATS_BH(mib, field)	\
 			__this_cpu_inc(mib[0]->mibs[field])
+
 #define SNMP_INC_STATS_USER(mib, field)	\
-			this_cpu_inc(mib[1]->mibs[field])
+			irqsafe_cpu_inc(mib[0]->mibs[field])
+
 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field)	\
 			atomic_long_inc(&mib->mibs[field])
+
 #define SNMP_INC_STATS(mib, field)	\
-			this_cpu_inc(mib[!in_softirq()]->mibs[field])
+			irqsafe_cpu_inc(mib[0]->mibs[field])
+
 #define SNMP_DEC_STATS(mib, field)	\
-			this_cpu_dec(mib[!in_softirq()]->mibs[field])
+			irqsafe_cpu_dec(mib[0]->mibs[field])
+
 #define SNMP_ADD_STATS_BH(mib, field, addend)	\
 			__this_cpu_add(mib[0]->mibs[field], addend)
+
 #define SNMP_ADD_STATS_USER(mib, field, addend)	\
-			this_cpu_add(mib[1]->mibs[field], addend)
+			irqsafe_cpu_add(mib[0]->mibs[field], addend)
+
 #define SNMP_ADD_STATS(mib, field, addend)	\
-			this_cpu_add(mib[!in_softirq()]->mibs[field], addend)
+			irqsafe_cpu_add(mib[0]->mibs[field], addend)
 /*
  * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
  * to make @ptr a non-percpu pointer.
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)	\
 	do { \
-		__typeof__(*mib[0]) *ptr; \
-		preempt_disable(); \
-		ptr = this_cpu_ptr((mib)[!in_softirq()]); \
-		ptr->mibs[basefield##PKTS]++; \
-		ptr->mibs[basefield##OCTETS] += addend;\
-		preempt_enable(); \
+		irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]);		\
+		irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);	\
 	} while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)	\
 	do { \
-		__typeof__(*mib[0]) *ptr = \
-			__this_cpu_ptr((mib)[0]); \
-		ptr->mibs[basefield##PKTS]++; \
-		ptr->mibs[basefield##OCTETS] += addend;\
+		__this_cpu_inc(mib[0]->mibs[basefield##PKTS]);		\
+		__this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);	\
 	} while (0)
 
 
@@ -179,40 +173,20 @@
 		ptr->mibs[field] += addend;				\
 		u64_stats_update_end(&ptr->syncp);			\
 	} while (0)
+
 #define SNMP_ADD_STATS64_USER(mib, field, addend) 			\
 	do {								\
-		__typeof__(*mib[0]) *ptr;				\
-		preempt_disable();					\
-		ptr = __this_cpu_ptr((mib)[1]);				\
-		u64_stats_update_begin(&ptr->syncp);			\
-		ptr->mibs[field] += addend;				\
-		u64_stats_update_end(&ptr->syncp);			\
-		preempt_enable();					\
+		local_bh_disable();					\
+		SNMP_ADD_STATS64_BH(mib, field, addend);		\
+		local_bh_enable();					\
 	} while (0)
+
 #define SNMP_ADD_STATS64(mib, field, addend)				\
-	do {								\
-		__typeof__(*mib[0]) *ptr;				\
-		preempt_disable();					\
-		ptr = __this_cpu_ptr((mib)[!in_softirq()]);		\
-		u64_stats_update_begin(&ptr->syncp);			\
-		ptr->mibs[field] += addend;				\
-		u64_stats_update_end(&ptr->syncp);			\
-		preempt_enable();					\
-	} while (0)
+		SNMP_ADD_STATS64_USER(mib, field, addend)
+
 #define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
 #define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64(mib, basefield, addend)			\
-	do {								\
-		__typeof__(*mib[0]) *ptr;				\
-		preempt_disable();					\
-		ptr = __this_cpu_ptr((mib)[!in_softirq()]);		\
-		u64_stats_update_begin(&ptr->syncp);			\
-		ptr->mibs[basefield##PKTS]++;				\
-		ptr->mibs[basefield##OCTETS] += addend;			\
-		u64_stats_update_end(&ptr->syncp);			\
-		preempt_enable();					\
-	} while (0)
 #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)			\
 	do {								\
 		__typeof__(*mib[0]) *ptr;				\
@@ -222,6 +196,12 @@
 		ptr->mibs[basefield##OCTETS] += addend;			\
 		u64_stats_update_end(&ptr->syncp);			\
 	} while (0)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend)			\
+	do {								\
+		local_bh_disable();					\
+		SNMP_UPD_PO_STATS64_BH(mib, basefield, addend);		\
+		local_bh_enable();					\
+	} while (0)
 #else
 #define SNMP_INC_STATS64_BH(mib, field)		SNMP_INC_STATS_BH(mib, field)
 #define SNMP_INC_STATS64_USER(mib, field)	SNMP_INC_STATS_USER(mib, field)
diff --git a/include/net/sock.h b/include/net/sock.h
index f2046e4..ebbc0ba 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -40,6 +40,7 @@
 #ifndef _SOCK_H
 #define _SOCK_H
 
+#include <linux/hardirq.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/list_nulls.h>
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cda30ea..149a415 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -122,7 +122,13 @@
 #endif
 #define TCP_RTO_MAX	((unsigned)(120*HZ))
 #define TCP_RTO_MIN	((unsigned)(HZ/5))
-#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value	*/
+#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC2988bis initial RTO value	*/
+#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
+						 * used as a fallback RTO for the
+						 * initial data transmission if no
+						 * valid RTT sample has been acquired,
+						 * most likely due to retrans in 3WHS.
+						 */
 
 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 					                 * for local resources.
@@ -295,7 +301,7 @@
 static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
 {
 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
-	return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
+	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
 }
 
 extern struct proto tcp_prot;
@@ -508,6 +514,7 @@
 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 extern void tcp_mtup_init(struct sock *sk);
+extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
diff --git a/lib/Kconfig b/lib/Kconfig
index 830181c..32f3e5a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -79,6 +79,13 @@
 	  require M here.  See Castagnoli93.
 	  Module will be libcrc32c.
 
+config CRC8
+	tristate "CRC8 function"
+	help
+	  This option provides CRC8 function. Drivers may select this
+	  when they need to do cyclic redundancy check according CRC8
+	  algorithm. Module will be called crc8.
+
 config AUDIT_GENERIC
 	bool
 	depends on AUDIT && !AUDIT_ARCH
@@ -262,4 +269,11 @@
 
 	  If unsure, say N.
 
+config CORDIC
+	tristate "Cordic function"
+	help
+	  The option provides arithmetic function using cordic algorithm
+	  so its calculations are in fixed point. Modules can select this
+	  when they require this function. Module will be called cordic.
+
 endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 6b597fd..892f4e2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_CRC32)	+= crc32.o
 obj-$(CONFIG_CRC7)	+= crc7.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
+obj-$(CONFIG_CRC8)	+= crc8.o
 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
 
 obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
@@ -112,6 +113,8 @@
 
 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
+obj-$(CONFIG_CORDIC) += cordic.o
+
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h
 
diff --git a/lib/cordic.c b/lib/cordic.c
new file mode 100644
index 0000000..aa27a88
--- /dev/null
+++ b/lib/cordic.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/cordic.h>
+
+#define CORDIC_ANGLE_GEN	39797
+#define CORDIC_PRECISION_SHIFT	16
+#define	CORDIC_NUM_ITER		(CORDIC_PRECISION_SHIFT + 2)
+
+#define	FIXED(X)	((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define	FLOAT(X)	(((X) >= 0) \
+		? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+		: -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
+static const s32 arctan_table[] = {
+	2949120,
+	1740967,
+	919879,
+	466945,
+	234379,
+	117304,
+	58666,
+	29335,
+	14668,
+	7334,
+	3667,
+	1833,
+	917,
+	458,
+	229,
+	115,
+	57,
+	29
+};
+
+/*
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle
+ *
+ * theta: angle in degrees for which i/q coordinate is to be calculated
+ * coord: function output parameter holding the i/q coordinate
+ */
+struct cordic_iq cordic_calc_iq(s32 theta)
+{
+	struct cordic_iq coord;
+	s32 angle, valtmp;
+	unsigned iter;
+	int signx = 1;
+	int signtheta;
+
+	coord.i = CORDIC_ANGLE_GEN;
+	coord.q = 0;
+	angle = 0;
+
+	theta = FIXED(theta);
+	signtheta = (theta < 0) ? -1 : 1;
+	theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
+		FIXED(180) * signtheta;
+
+	if (FLOAT(theta) > 90) {
+		theta -= FIXED(180);
+		signx = -1;
+	} else if (FLOAT(theta) < -90) {
+		theta += FIXED(180);
+		signx = -1;
+	}
+
+	for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
+		if (theta > angle) {
+			valtmp = coord.i - (coord.q >> iter);
+			coord.q += (coord.i >> iter);
+			angle += arctan_table[iter];
+		} else {
+			valtmp = coord.i + (coord.q >> iter);
+			coord.q -= (coord.i >> iter);
+			angle -= arctan_table[iter];
+		}
+		coord.i = valtmp;
+	}
+
+	coord.i *= signx;
+	coord.q *= signx;
+	return coord;
+}
+EXPORT_SYMBOL(cordic_calc_iq);
+
+MODULE_DESCRIPTION("Cordic functions");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/crc8.c b/lib/crc8.c
new file mode 100644
index 0000000..87b59ca
--- /dev/null
+++ b/lib/crc8.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc8.h>
+#include <linux/printk.h>
+
+/*
+ * crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
+ *
+ * table:	table to be filled.
+ * polynomial:	polynomial for which table is to be filled.
+ */
+void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+	int i, j;
+	const u8 msbit = 0x80;
+	u8 t = msbit;
+
+	table[0] = 0;
+
+	for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) {
+		t = (t << 1) ^ (t & msbit ? polynomial : 0);
+		for (j = 0; j < i; j++)
+			table[i+j] = table[j] ^ t;
+	}
+}
+EXPORT_SYMBOL(crc8_populate_msb);
+
+/*
+ * crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
+ *
+ * table:	table to be filled.
+ * polynomial:	polynomial for which table is to be filled.
+ */
+void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+	int i, j;
+	u8 t = 1;
+
+	table[0] = 0;
+
+	for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) {
+		t = (t >> 1) ^ (t & 1 ? polynomial : 0);
+		for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i)
+			table[i+j] = table[j] ^ t;
+	}
+}
+EXPORT_SYMBOL(crc8_populate_lsb);
+
+/*
+ * crc8 - calculate a crc8 over the given input data.
+ *
+ * table: crc table used for calculation.
+ * pdata: pointer to data buffer.
+ * nbytes: number of bytes in data buffer.
+ * crc:	previous returned crc8 value.
+ */
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
+{
+	/* loop over the buffer data */
+	while (nbytes-- > 0)
+		crc = table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+EXPORT_SYMBOL(crc8);
+
+MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 917ecb9..d24c464 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -18,6 +18,8 @@
  *		2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/capability.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
@@ -149,13 +151,13 @@
 	const struct net_device_ops *ops = real_dev->netdev_ops;
 
 	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
-		pr_info("8021q: VLANs not supported on %s\n", name);
+		pr_info("VLANs not supported on %s\n", name);
 		return -EOPNOTSUPP;
 	}
 
 	if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
 	    (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
-		pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
+		pr_info("Device %s has buggy VLAN hw accel\n", name);
 		return -EOPNOTSUPP;
 	}
 
@@ -344,13 +346,12 @@
 	case NETDEV_CHANGENAME:
 		vlan_proc_rem_dev(dev);
 		if (vlan_proc_add_dev(dev) < 0)
-			pr_warning("8021q: failed to change proc name for %s\n",
-					dev->name);
+			pr_warn("failed to change proc name for %s\n",
+				dev->name);
 		break;
 	case NETDEV_REGISTER:
 		if (vlan_proc_add_dev(dev) < 0)
-			pr_warning("8021q: failed to add proc entry for %s\n",
-					dev->name);
+			pr_warn("failed to add proc entry for %s\n", dev->name);
 		break;
 	case NETDEV_UNREGISTER:
 		vlan_proc_rem_dev(dev);
@@ -374,7 +375,7 @@
 	if ((event == NETDEV_UP) &&
 	    (dev->features & NETIF_F_HW_VLAN_FILTER) &&
 	    dev->netdev_ops->ndo_vlan_rx_add_vid) {
-		pr_info("8021q: adding VLAN 0 to HW filter on device %s\n",
+		pr_info("adding VLAN 0 to HW filter on device %s\n",
 			dev->name);
 		dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
 	}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7ea5cf9..1c9aa8c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -20,6 +20,8 @@
  *		2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
@@ -55,7 +57,7 @@
 		return arp_find(veth->h_dest, skb);
 #endif
 	default:
-		pr_debug("%s: unable to resolve type %X addresses.\n",
+		pr_debug("%s: unable to resolve type %X addresses\n",
 			 dev->name, ntohs(veth->h_vlan_encapsulated_proto));
 
 		memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d940c49..d34b6da 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -17,6 +17,8 @@
  * Jan 20, 1998        Ben Greear     Initial Version
  *****************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -155,7 +157,7 @@
 	return 0;
 
 err:
-	pr_err("%s: can't create entry in proc filesystem!\n", __func__);
+	pr_err("can't create entry in proc filesystem!\n");
 	vlan_proc_cleanup(net);
 	return -ENOBUFS;
 }
@@ -229,7 +231,7 @@
 
 	++*pos;
 
-	dev = (struct net_device *)v;
+	dev = v;
 	if (v == SEQ_START_TOKEN)
 		dev = net_device_entry(&net->dev_base_head);
 
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 3ccca42..aa97240 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1005,7 +1005,7 @@
 	struct mpoa_client *mpc;
 	struct lec_priv *priv;
 
-	dev = (struct net_device *)dev_ptr;
+	dev = dev_ptr;
 
 	if (!net_eq(dev_net(dev), &init_net))
 		return NOTIFY_DONE;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index e9aced0..db4a11c 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -37,6 +37,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/atm.h>
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 6c051ad..2b68d06 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -5,6 +5,7 @@
 config BATMAN_ADV
 	tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
 	depends on NET
+	select CRC16
         default n
 	---help---
 
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index a8c3203..c583e04 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -20,24 +20,19 @@
  */
 
 #include "main.h"
+#include "translation-table.h"
 #include "aggregation.h"
 #include "send.h"
 #include "routing.h"
 #include "hard-interface.h"
 
-/* calculate the size of the tt information for a given packet */
-static int tt_len(struct batman_packet *batman_packet)
-{
-	return batman_packet->num_tt * ETH_ALEN;
-}
-
 /* return true if new_packet can be aggregated with forw_packet */
-static bool can_aggregate_with(struct batman_packet *new_batman_packet,
+static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
 			       int packet_len,
 			       unsigned long send_time,
 			       bool directlink,
-			       struct hard_iface *if_incoming,
-			       struct forw_packet *forw_packet)
+			       const struct hard_iface *if_incoming,
+			       const struct forw_packet *forw_packet)
 {
 	struct batman_packet *batman_packet =
 		(struct batman_packet *)forw_packet->skb->data;
@@ -97,8 +92,9 @@
 }
 
 /* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
-				  unsigned long send_time, bool direct_link,
+static void new_aggregated_packet(const unsigned char *packet_buff,
+				  int packet_len, unsigned long send_time,
+				  bool direct_link,
 				  struct hard_iface *if_incoming,
 				  int own_packet)
 {
@@ -118,7 +114,7 @@
 		}
 	}
 
-	forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
+	forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
 	if (!forw_packet_aggr) {
 		if (!own_packet)
 			atomic_inc(&bat_priv->batman_queue_left);
@@ -150,7 +146,7 @@
 	forw_packet_aggr->own = own_packet;
 	forw_packet_aggr->if_incoming = if_incoming;
 	forw_packet_aggr->num_packets = 0;
-	forw_packet_aggr->direct_link_flags = 0;
+	forw_packet_aggr->direct_link_flags = NO_FLAGS;
 	forw_packet_aggr->send_time = send_time;
 
 	/* save packet direct link flag status */
@@ -176,8 +172,7 @@
 
 /* aggregate a new packet into the existing aggregation */
 static void aggregate(struct forw_packet *forw_packet_aggr,
-		      unsigned char *packet_buff,
-		      int packet_len,
+		      const unsigned char *packet_buff, int packet_len,
 		      bool direct_link)
 {
 	unsigned char *skb_buff;
@@ -195,7 +190,7 @@
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
 			    unsigned char *packet_buff, int packet_len,
-			    struct hard_iface *if_incoming, char own_packet,
+			    struct hard_iface *if_incoming, int own_packet,
 			    unsigned long send_time)
 {
 	/**
@@ -253,8 +248,9 @@
 }
 
 /* unpack the aggregated packets and process them one by one */
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-			     int packet_len, struct hard_iface *if_incoming)
+void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
+			     unsigned char *packet_buff, int packet_len,
+			     struct hard_iface *if_incoming)
 {
 	struct batman_packet *batman_packet;
 	int buff_pos = 0;
@@ -263,18 +259,20 @@
 	batman_packet = (struct batman_packet *)packet_buff;
 
 	do {
-		/* network to host order for our 32bit seqno, and the
-		   orig_interval. */
+		/* network to host order for our 32bit seqno and the
+		   orig_interval */
 		batman_packet->seqno = ntohl(batman_packet->seqno);
+		batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
 
 		tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
-		receive_bat_packet(ethhdr, batman_packet,
-				   tt_buff, tt_len(batman_packet),
-				   if_incoming);
 
-		buff_pos += BAT_PACKET_LEN + tt_len(batman_packet);
+		receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
+
+		buff_pos += BAT_PACKET_LEN +
+			tt_len(batman_packet->tt_num_changes);
+
 		batman_packet = (struct batman_packet *)
 			(packet_buff + buff_pos);
 	} while (aggregated_packet(buff_pos, packet_len,
-				   batman_packet->num_tt));
+				   batman_packet->tt_num_changes));
 }
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 7e6d72f..216337b 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,11 @@
 #include "main.h"
 
 /* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
+static inline int aggregated_packet(int buff_pos, int packet_len,
+				    int tt_num_changes)
 {
-	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN);
+	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
+						sizeof(struct tt_change));
 
 	return (next_buff_pos <= packet_len) &&
 		(next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -35,9 +37,10 @@
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
 			    unsigned char *packet_buff, int packet_len,
-			    struct hard_iface *if_incoming, char own_packet,
+			    struct hard_iface *if_incoming, int own_packet,
 			    unsigned long send_time);
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-			     int packet_len, struct hard_iface *if_incoming);
+void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
+			     unsigned char *packet_buff, int packet_len,
+			     struct hard_iface *if_incoming);
 
 #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index abaeec5..d0af9bf 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -50,7 +50,8 @@
 		debug_log->log_start = debug_log->log_end - log_buff_len;
 }
 
-static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
+__printf(2, 3)
+static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
 {
 	va_list args;
 	static char debug_log_buf[256];
@@ -74,14 +75,14 @@
 	return 0;
 }
 
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
+int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
 {
 	va_list args;
 	char tmp_log_buf[256];
 
 	va_start(args, fmt);
 	vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
-	fdebug_log(bat_priv->debug_log, "[%10u] %s",
+	fdebug_log(bat_priv->debug_log, "[%10lu] %s",
 		   (jiffies / HZ), tmp_log_buf);
 	va_end(args);
 
@@ -114,7 +115,7 @@
 	    !(debug_log->log_end - debug_log->log_start))
 		return -EAGAIN;
 
-	if ((!buf) || (count < 0))
+	if (!buf)
 		return -EINVAL;
 
 	if (count == 0)
@@ -184,7 +185,7 @@
 	if (!bat_priv->debug_dir)
 		goto err;
 
-	bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC);
+	bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
 	if (!bat_priv->debug_log)
 		goto err;
 
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 497a070..cd15deb 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -28,9 +28,31 @@
 #include "gateway_client.h"
 #include "vis.h"
 
-#define to_dev(obj)		container_of(obj, struct device, kobj)
-#define kobj_to_netdev(obj)	to_net_dev(to_dev(obj->parent))
-#define kobj_to_batpriv(obj)	netdev_priv(kobj_to_netdev(obj))
+static struct net_device *kobj_to_netdev(struct kobject *obj)
+{
+	struct device *dev = container_of(obj->parent, struct device, kobj);
+	return to_net_dev(dev);
+}
+
+static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
+{
+	struct net_device *net_dev = kobj_to_netdev(obj);
+	return netdev_priv(net_dev);
+}
+
+#define UEV_TYPE_VAR	"BATTYPE="
+#define UEV_ACTION_VAR	"BATACTION="
+#define UEV_DATA_VAR	"BATDATA="
+
+static char *uev_action_str[] = {
+	"add",
+	"del",
+	"change"
+};
+
+static char *uev_type_str[] = {
+	"gw"
+};
 
 /* Use this, if you have customized show and store functions */
 #define BAT_ATTR(_name, _mode, _show, _store)	\
@@ -96,7 +118,7 @@
 
 static int store_bool_attr(char *buff, size_t count,
 			   struct net_device *net_dev,
-			   char *attr_name, atomic_t *attr)
+			   const char *attr_name, atomic_t *attr)
 {
 	int enabled = -1;
 
@@ -138,16 +160,15 @@
 {
 	int ret;
 
-	ret = store_bool_attr(buff, count, net_dev, (char *)attr->name,
-			      attr_store);
+	ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
 	if (post_func && ret)
 		post_func(net_dev);
 
 	return ret;
 }
 
-static int store_uint_attr(char *buff, size_t count,
-			   struct net_device *net_dev, char *attr_name,
+static int store_uint_attr(const char *buff, size_t count,
+			   struct net_device *net_dev, const char *attr_name,
 			   unsigned int min, unsigned int max, atomic_t *attr)
 {
 	unsigned long uint_val;
@@ -183,15 +204,15 @@
 	return count;
 }
 
-static inline ssize_t __store_uint_attr(char *buff, size_t count,
+static inline ssize_t __store_uint_attr(const char *buff, size_t count,
 			int min, int max,
 			void (*post_func)(struct net_device *),
-			struct attribute *attr,
+			const struct attribute *attr,
 			atomic_t *attr_store, struct net_device *net_dev)
 {
 	int ret;
 
-	ret = store_uint_attr(buff, count, net_dev, (char *)attr->name,
+	ret = store_uint_attr(buff, count, net_dev, attr->name,
 			      min, max, attr_store);
 	if (post_func && ret)
 		post_func(net_dev);
@@ -368,7 +389,7 @@
 static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
 		store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL);
+BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
 #endif
 
 static struct bat_attribute *mesh_attrs[] = {
@@ -594,3 +615,60 @@
 	kobject_put(*hardif_obj);
 	*hardif_obj = NULL;
 }
+
+int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
+		 enum uev_action action, const char *data)
+{
+	int ret = -1;
+	struct hard_iface *primary_if = NULL;
+	struct kobject *bat_kobj;
+	char *uevent_env[4] = { NULL, NULL, NULL, NULL };
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	bat_kobj = &primary_if->soft_iface->dev.kobj;
+
+	uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
+				strlen(uev_type_str[type]) + 1,
+				GFP_ATOMIC);
+	if (!uevent_env[0])
+		goto out;
+
+	sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
+
+	uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
+				strlen(uev_action_str[action]) + 1,
+				GFP_ATOMIC);
+	if (!uevent_env[1])
+		goto out;
+
+	sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
+
+	/* If the event is DEL, ignore the data field */
+	if (action != UEV_DEL) {
+		uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
+					strlen(data) + 1, GFP_ATOMIC);
+		if (!uevent_env[2])
+			goto out;
+
+		sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
+	}
+
+	ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
+out:
+	kfree(uevent_env[0]);
+	kfree(uevent_env[1]);
+	kfree(uevent_env[2]);
+
+	if (primary_if)
+		hardif_free_ref(primary_if);
+
+	if (ret)
+		bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send "
+			"uevent for (%s,%s,%s) event (err: %d)\n",
+			uev_type_str[type], uev_action_str[action],
+			(action == UEV_DEL ? "NULL" : data), ret);
+	return ret;
+}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 02f1fa7..a3f75a7 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -38,5 +38,7 @@
 void sysfs_del_meshif(struct net_device *dev);
 int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
 void sysfs_del_hardif(struct kobject **hardif_obj);
+int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
+		 enum uev_action action, const char *data);
 
 #endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index ad2ca92..c1f4bfc 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -26,8 +26,8 @@
 
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno,
-		       uint32_t curr_seqno)
+int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
+		   uint32_t curr_seqno)
 {
 	int32_t diff, word_offset, word_num;
 
@@ -127,10 +127,10 @@
  *  1 if the window was moved (either new or very old)
  *  0 if the window was not moved/shifted.
  */
-char bit_get_packet(void *priv, unsigned long *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark)
+int bit_get_packet(void *priv, unsigned long *seq_bits,
+		    int32_t seq_num_diff, int set_mark)
 {
-	struct bat_priv *bat_priv = (struct bat_priv *)priv;
+	struct bat_priv *bat_priv = priv;
 
 	/* sequence number is slightly older. We already got a sequence number
 	 * higher than this one, so we just mark it. */
@@ -190,7 +190,7 @@
 /* count the hamming weight, how many good packets did we receive? just count
  * the 1's.
  */
-int bit_packet_count(unsigned long *seq_bits)
+int bit_packet_count(const unsigned long *seq_bits)
 {
 	int i, hamming = 0;
 
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 769c246..9c04422 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -26,8 +26,8 @@
 
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno,
-					   uint32_t curr_seqno);
+int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
+		   uint32_t curr_seqno);
 
 /* turn corresponding bit on, so we can remember that we got the packet */
 void bit_mark(unsigned long *seq_bits, int32_t n);
@@ -35,10 +35,10 @@
 
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old  */
-char bit_get_packet(void *priv, unsigned long *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark);
+int bit_get_packet(void *priv, unsigned long *seq_bits,
+		   int32_t seq_num_diff, int set_mark);
 
 /* count the hamming weight, how many good packets did we receive? */
-int  bit_packet_count(unsigned long *seq_bits);
+int bit_packet_count(const unsigned long *seq_bits);
 
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 61605a0..8b25b52 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -20,15 +20,22 @@
  */
 
 #include "main.h"
+#include "bat_sysfs.h"
 #include "gateway_client.h"
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
+#include "routing.h"
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/udp.h>
 #include <linux/if_vlan.h>
 
+/* This is the offset of the options field in a dhcp packet starting at
+ * the beginning of the dhcp header */
+#define DHCP_OPTIONS_OFFSET 240
+#define DHCP_REQUEST 3
+
 static void gw_node_free_ref(struct gw_node *gw_node)
 {
 	if (atomic_dec_and_test(&gw_node->refcount))
@@ -86,7 +93,7 @@
 	if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
 		new_gw_node = NULL;
 
-	curr_gw_node = bat_priv->curr_gw;
+	curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
 	rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
 
 	if (curr_gw_node)
@@ -97,40 +104,19 @@
 
 void gw_deselect(struct bat_priv *bat_priv)
 {
-	gw_select(bat_priv, NULL);
+	atomic_set(&bat_priv->gw_reselect, 1);
 }
 
-void gw_election(struct bat_priv *bat_priv)
+static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
 {
-	struct hlist_node *node;
-	struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
 	struct neigh_node *router;
-	uint8_t max_tq = 0;
+	struct hlist_node *node;
+	struct gw_node *gw_node, *curr_gw = NULL;
 	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+	uint8_t max_tq = 0;
 	int down, up;
 
-	/**
-	 * The batman daemon checks here if we already passed a full originator
-	 * cycle in order to make sure we don't choose the first gateway we
-	 * hear about. This check is based on the daemon's uptime which we
-	 * don't have.
-	 **/
-	if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
-		return;
-
-	curr_gw = gw_get_selected_gw_node(bat_priv);
-	if (curr_gw)
-		goto out;
-
 	rcu_read_lock();
-	if (hlist_empty(&bat_priv->gw_list)) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Removing selected gateway - "
-			"no gateway in range\n");
-		gw_deselect(bat_priv);
-		goto unlock;
-	}
-
 	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
 		if (gw_node->deleted)
 			continue;
@@ -139,6 +125,9 @@
 		if (!router)
 			continue;
 
+		if (!atomic_inc_not_zero(&gw_node->refcount))
+			goto next;
+
 		switch (atomic_read(&bat_priv->gw_sel_class)) {
 		case 1: /* fast connection */
 			gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
@@ -151,8 +140,12 @@
 
 			if ((tmp_gw_factor > max_gw_factor) ||
 			    ((tmp_gw_factor == max_gw_factor) &&
-			     (router->tq_avg > max_tq)))
-				curr_gw_tmp = gw_node;
+			     (router->tq_avg > max_tq))) {
+				if (curr_gw)
+					gw_node_free_ref(curr_gw);
+				curr_gw = gw_node;
+				atomic_inc(&curr_gw->refcount);
+			}
 			break;
 
 		default: /**
@@ -163,8 +156,12 @@
 			  *     soon as a better gateway appears which has
 			  *     $routing_class more tq points)
 			  **/
-			if (router->tq_avg > max_tq)
-				curr_gw_tmp = gw_node;
+			if (router->tq_avg > max_tq) {
+				if (curr_gw)
+					gw_node_free_ref(curr_gw);
+				curr_gw = gw_node;
+				atomic_inc(&curr_gw->refcount);
+			}
 			break;
 		}
 
@@ -174,42 +171,81 @@
 		if (tmp_gw_factor > max_gw_factor)
 			max_gw_factor = tmp_gw_factor;
 
+		gw_node_free_ref(gw_node);
+
+next:
 		neigh_node_free_ref(router);
 	}
-
-	if (curr_gw != curr_gw_tmp) {
-		router = orig_node_get_router(curr_gw_tmp->orig_node);
-		if (!router)
-			goto unlock;
-
-		if ((curr_gw) && (!curr_gw_tmp))
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Removing selected gateway - "
-				"no gateway in range\n");
-		else if ((!curr_gw) && (curr_gw_tmp))
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Adding route to gateway %pM "
-				"(gw_flags: %i, tq: %i)\n",
-				curr_gw_tmp->orig_node->orig,
-				curr_gw_tmp->orig_node->gw_flags,
-				router->tq_avg);
-		else
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Changing route to gateway %pM "
-				"(gw_flags: %i, tq: %i)\n",
-				curr_gw_tmp->orig_node->orig,
-				curr_gw_tmp->orig_node->gw_flags,
-				router->tq_avg);
-
-		neigh_node_free_ref(router);
-		gw_select(bat_priv, curr_gw_tmp);
-	}
-
-unlock:
 	rcu_read_unlock();
+
+	return curr_gw;
+}
+
+void gw_election(struct bat_priv *bat_priv)
+{
+	struct gw_node *curr_gw = NULL, *next_gw = NULL;
+	struct neigh_node *router = NULL;
+	char gw_addr[18] = { '\0' };
+
+	/**
+	 * The batman daemon checks here if we already passed a full originator
+	 * cycle in order to make sure we don't choose the first gateway we
+	 * hear about. This check is based on the daemon's uptime which we
+	 * don't have.
+	 **/
+	if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
+		goto out;
+
+	if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
+		goto out;
+
+	curr_gw = gw_get_selected_gw_node(bat_priv);
+
+	next_gw = gw_get_best_gw_node(bat_priv);
+
+	if (curr_gw == next_gw)
+		goto out;
+
+	if (next_gw) {
+		sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
+
+		router = orig_node_get_router(next_gw->orig_node);
+		if (!router) {
+			gw_deselect(bat_priv);
+			goto out;
+		}
+	}
+
+	if ((curr_gw) && (!next_gw)) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Removing selected gateway - no gateway in range\n");
+		throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
+	} else if ((!curr_gw) && (next_gw)) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+			next_gw->orig_node->orig,
+			next_gw->orig_node->gw_flags,
+			router->tq_avg);
+		throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
+	} else {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Changing route to gateway %pM "
+			"(gw_flags: %i, tq: %i)\n",
+			next_gw->orig_node->orig,
+			next_gw->orig_node->gw_flags,
+			router->tq_avg);
+		throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
+	}
+
+	gw_select(bat_priv, next_gw);
+
 out:
 	if (curr_gw)
 		gw_node_free_ref(curr_gw);
+	if (next_gw)
+		gw_node_free_ref(next_gw);
+	if (router)
+		neigh_node_free_ref(router);
 }
 
 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -273,11 +309,10 @@
 	struct gw_node *gw_node;
 	int down, up;
 
-	gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
+	gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
 	if (!gw_node)
 		return;
 
-	memset(gw_node, 0, sizeof(struct gw_node));
 	INIT_HLIST_NODE(&gw_node->list);
 	gw_node->orig_node = orig_node;
 	atomic_set(&gw_node->refcount, 1);
@@ -323,7 +358,7 @@
 
 		gw_node->deleted = 0;
 
-		if (new_gwflags == 0) {
+		if (new_gwflags == NO_FLAGS) {
 			gw_node->deleted = jiffies;
 			bat_dbg(DBG_BATMAN, bat_priv,
 				"Gateway %pM removed from gateway list\n",
@@ -336,7 +371,7 @@
 		goto unlock;
 	}
 
-	if (new_gwflags == 0)
+	if (new_gwflags == NO_FLAGS)
 		goto unlock;
 
 	gw_node_add(bat_priv, orig_node, new_gwflags);
@@ -353,7 +388,7 @@
 
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
-	return gw_node_update(bat_priv, orig_node, 0);
+	gw_node_update(bat_priv, orig_node, 0);
 }
 
 void gw_node_purge(struct bat_priv *bat_priv)
@@ -361,7 +396,7 @@
 	struct gw_node *gw_node, *curr_gw;
 	struct hlist_node *node, *node_tmp;
 	unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
-	char do_deselect = 0;
+	int do_deselect = 0;
 
 	curr_gw = gw_get_selected_gw_node(bat_priv);
 
@@ -394,8 +429,8 @@
 /**
  * fails if orig_node has no router
  */
-static int _write_buffer_text(struct bat_priv *bat_priv,
-			      struct seq_file *seq, struct gw_node *gw_node)
+static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
+			      const struct gw_node *gw_node)
 {
 	struct gw_node *curr_gw;
 	struct neigh_node *router;
@@ -480,14 +515,75 @@
 	return ret;
 }
 
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
+static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
+{
+	int ret = false;
+	unsigned char *p;
+	int pkt_len;
+
+	if (skb_linearize(skb) < 0)
+		goto out;
+
+	pkt_len = skb_headlen(skb);
+
+	if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
+		goto out;
+
+	p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
+	pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
+
+	/* Access the dhcp option lists. Each entry is made up by:
+	 * - octect 1: option type
+	 * - octect 2: option data len (only if type != 255 and 0)
+	 * - octect 3: option data */
+	while (*p != 255 && !ret) {
+		/* p now points to the first octect: option type */
+		if (*p == 53) {
+			/* type 53 is the message type option.
+			 * Jump the len octect and go to the data octect */
+			if (pkt_len < 2)
+				goto out;
+			p += 2;
+
+			/* check if the message type is what we need */
+			if (*p == DHCP_REQUEST)
+				ret = true;
+			break;
+		} else if (*p == 0) {
+			/* option type 0 (padding), just go forward */
+			if (pkt_len < 1)
+				goto out;
+			pkt_len--;
+			p++;
+		} else {
+			/* This is any other option. So we get the length... */
+			if (pkt_len < 1)
+				goto out;
+			pkt_len--;
+			p++;
+
+			/* ...and then we jump over the data */
+			if (pkt_len < *p)
+				goto out;
+			pkt_len -= *p;
+			p += (*p);
+		}
+	}
+out:
+	return ret;
+}
+
+int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
+		 struct orig_node *old_gw)
 {
 	struct ethhdr *ethhdr;
 	struct iphdr *iphdr;
 	struct ipv6hdr *ipv6hdr;
 	struct udphdr *udphdr;
 	struct gw_node *curr_gw;
+	struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
 	unsigned int header_len = 0;
+	int ret = 1;
 
 	if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
 		return 0;
@@ -509,7 +605,7 @@
 	/* check for ip header */
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_IP:
-		if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr)))
+		if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
 			return 0;
 		iphdr = (struct iphdr *)(skb->data + header_len);
 		header_len += iphdr->ihl * 4;
@@ -520,10 +616,10 @@
 
 		break;
 	case ETH_P_IPV6:
-		if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr)))
+		if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
 			return 0;
 		ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
-		header_len += sizeof(struct ipv6hdr);
+		header_len += sizeof(*ipv6hdr);
 
 		/* check for udp header */
 		if (ipv6hdr->nexthdr != IPPROTO_UDP)
@@ -534,10 +630,10 @@
 		return 0;
 	}
 
-	if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr)))
+	if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
 		return 0;
 	udphdr = (struct udphdr *)(skb->data + header_len);
-	header_len += sizeof(struct udphdr);
+	header_len += sizeof(*udphdr);
 
 	/* check for bootp port */
 	if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
@@ -555,7 +651,30 @@
 	if (!curr_gw)
 		return 0;
 
+	/* If old_gw != NULL then this packet is unicast.
+	 * So, at this point we have to check the message type: if it is a
+	 * DHCPREQUEST we have to decide whether to drop it or not */
+	if (old_gw && curr_gw->orig_node != old_gw) {
+		if (is_type_dhcprequest(skb, header_len)) {
+			/* If the dhcp packet has been sent to a different gw,
+			 * we have to evaluate whether the old gw is still
+			 * reliable enough */
+			neigh_curr = find_router(bat_priv, curr_gw->orig_node,
+						 NULL);
+			neigh_old = find_router(bat_priv, old_gw, NULL);
+			if (!neigh_curr || !neigh_old)
+				goto free_neigh;
+			if (neigh_curr->tq_avg - neigh_old->tq_avg <
+								GW_THRESHOLD)
+				ret = -1;
+		}
+	}
+free_neigh:
+	if (neigh_old)
+		neigh_node_free_ref(neigh_old);
+	if (neigh_curr)
+		neigh_node_free_ref(neigh_curr);
 	if (curr_gw)
 		gw_node_free_ref(curr_gw);
-	return 1;
+	return ret;
 }
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 1ce8c60..b9b983c 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,6 +31,7 @@
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void gw_node_purge(struct bat_priv *bat_priv);
 int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb);
+int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
+		 struct orig_node *old_gw);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 50d3a59..18661af 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -61,9 +61,9 @@
 /* returns the up and downspeeds in kbit, calculated from the class */
 void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
 {
-	char sbit = (gw_srv_class & 0x80) >> 7;
-	char dpart = (gw_srv_class & 0x78) >> 3;
-	char upart = (gw_srv_class & 0x07);
+	int sbit = (gw_srv_class & 0x80) >> 7;
+	int dpart = (gw_srv_class & 0x78) >> 3;
+	int upart = (gw_srv_class & 0x07);
 
 	if (!gw_srv_class) {
 		*down = 0;
@@ -76,10 +76,11 @@
 }
 
 static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-			       long *up, long *down)
+			       int *up, int *down)
 {
 	int ret, multi = 1;
 	char *slash_ptr, *tmp_ptr;
+	long ldown, lup;
 
 	slash_ptr = strchr(buff, '/');
 	if (slash_ptr)
@@ -96,7 +97,7 @@
 			*tmp_ptr = '\0';
 	}
 
-	ret = strict_strtoul(buff, 10, down);
+	ret = strict_strtol(buff, 10, &ldown);
 	if (ret) {
 		bat_err(net_dev,
 			"Download speed of gateway mode invalid: %s\n",
@@ -104,7 +105,7 @@
 		return false;
 	}
 
-	*down *= multi;
+	*down = ldown * multi;
 
 	/* we also got some upload info */
 	if (slash_ptr) {
@@ -121,7 +122,7 @@
 				*tmp_ptr = '\0';
 		}
 
-		ret = strict_strtoul(slash_ptr + 1, 10, up);
+		ret = strict_strtol(slash_ptr + 1, 10, &lup);
 		if (ret) {
 			bat_err(net_dev,
 				"Upload speed of gateway mode invalid: "
@@ -129,7 +130,7 @@
 			return false;
 		}
 
-		*up *= multi;
+		*up = lup * multi;
 	}
 
 	return true;
@@ -138,7 +139,8 @@
 ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
 {
 	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	long gw_bandwidth_tmp = 0, up = 0, down = 0;
+	long gw_bandwidth_tmp = 0;
+	int up = 0, down = 0;
 	bool ret;
 
 	ret = parse_gw_bandwidth(net_dev, buff, &up, &down);
@@ -158,12 +160,11 @@
 	 * speeds, hence we need to calculate it back to show the number
 	 * that is going to be propagated
 	 **/
-	gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp,
-			     (int *)&down, (int *)&up);
+	gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
 
 	gw_deselect(bat_priv);
 	bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' "
-		 "(propagating: %ld%s/%ld%s)\n",
+		 "(propagating: %d%s/%d%s)\n",
 		 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
 		 (down > 2048 ? down / 1024 : down),
 		 (down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dfbfccc..db7aacf 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -46,7 +46,7 @@
 	kfree(hard_iface);
 }
 
-struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
+struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
 {
 	struct hard_iface *hard_iface;
 
@@ -64,7 +64,7 @@
 	return hard_iface;
 }
 
-static int is_valid_iface(struct net_device *net_dev)
+static int is_valid_iface(const struct net_device *net_dev)
 {
 	if (net_dev->flags & IFF_LOOPBACK)
 		return 0;
@@ -86,7 +86,7 @@
 	return 1;
 }
 
-static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
+static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
 {
 	struct hard_iface *hard_iface;
 
@@ -138,7 +138,7 @@
 	if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
 		new_hard_iface = NULL;
 
-	curr_hard_iface = bat_priv->primary_if;
+	curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
 	rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
 
 	if (curr_hard_iface)
@@ -152,15 +152,9 @@
 	batman_packet->ttl = TTL;
 
 	primary_if_update_addr(bat_priv);
-
-	/***
-	 * hacky trick to make sure that we send the TT information via
-	 * our new primary interface
-	 */
-	atomic_set(&bat_priv->tt_local_changed, 1);
 }
 
-static bool hardif_is_iface_up(struct hard_iface *hard_iface)
+static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
 {
 	if (hard_iface->net_dev->flags & IFF_UP)
 		return true;
@@ -176,9 +170,9 @@
 	       hard_iface->net_dev->dev_addr, ETH_ALEN);
 }
 
-static void check_known_mac_addr(struct net_device *net_dev)
+static void check_known_mac_addr(const struct net_device *net_dev)
 {
-	struct hard_iface *hard_iface;
+	const struct hard_iface *hard_iface;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -204,8 +198,8 @@
 
 int hardif_min_mtu(struct net_device *soft_iface)
 {
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct hard_iface *hard_iface;
+	const struct bat_priv *bat_priv = netdev_priv(soft_iface);
+	const struct hard_iface *hard_iface;
 	/* allow big frames if all devices are capable to do so
 	 * (have MTU > 1500 + BAT_HEADER_LEN) */
 	int min_mtu = ETH_DATA_LEN;
@@ -285,7 +279,8 @@
 	update_min_mtu(hard_iface->soft_iface);
 }
 
-int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
+int hardif_enable_interface(struct hard_iface *hard_iface,
+			    const char *iface_name)
 {
 	struct bat_priv *bat_priv;
 	struct batman_packet *batman_packet;
@@ -336,10 +331,11 @@
 	batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
 	batman_packet->packet_type = BAT_PACKET;
 	batman_packet->version = COMPAT_VERSION;
-	batman_packet->flags = 0;
+	batman_packet->flags = NO_FLAGS;
 	batman_packet->ttl = 2;
 	batman_packet->tq = TQ_MAX_VALUE;
-	batman_packet->num_tt = 0;
+	batman_packet->tt_num_changes = 0;
+	batman_packet->ttvn = 0;
 
 	hard_iface->if_num = bat_priv->num_ifaces;
 	bat_priv->num_ifaces++;
@@ -458,7 +454,7 @@
 
 	dev_hold(net_dev);
 
-	hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
+	hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
 	if (!hard_iface) {
 		pr_err("Can't add interface (%s): out of memory\n",
 		       net_dev->name);
@@ -522,7 +518,7 @@
 static int hard_if_event(struct notifier_block *this,
 			 unsigned long event, void *ptr)
 {
-	struct net_device *net_dev = (struct net_device *)ptr;
+	struct net_device *net_dev = ptr;
 	struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
 	struct hard_iface *primary_if = NULL;
 	struct bat_priv *bat_priv;
@@ -567,7 +563,7 @@
 		break;
 	default:
 		break;
-	};
+	}
 
 hardif_put:
 	hardif_free_ref(hard_iface);
@@ -658,6 +654,14 @@
 	case BAT_VIS:
 		ret = recv_vis_packet(skb, hard_iface);
 		break;
+		/* Translation table query (request or response) */
+	case BAT_TT_QUERY:
+		ret = recv_tt_query(skb, hard_iface);
+		break;
+		/* Roaming advertisement */
+	case BAT_ROAM_ADV:
+		ret = recv_roam_adv(skb, hard_iface);
+		break;
 	default:
 		ret = NET_RX_DROP;
 	}
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 6426599..442eacb 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -22,17 +22,21 @@
 #ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
 #define _NET_BATMAN_ADV_HARD_INTERFACE_H_
 
-#define IF_NOT_IN_USE 0
-#define IF_TO_BE_REMOVED 1
-#define IF_INACTIVE 2
-#define IF_ACTIVE 3
-#define IF_TO_BE_ACTIVATED 4
-#define IF_I_WANT_YOU 5
+enum hard_if_state {
+	IF_NOT_IN_USE,
+	IF_TO_BE_REMOVED,
+	IF_INACTIVE,
+	IF_ACTIVE,
+	IF_TO_BE_ACTIVATED,
+	IF_I_WANT_YOU
+};
 
 extern struct notifier_block hard_if_notifier;
 
-struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name);
+struct hard_iface*
+hardif_get_by_netdev(const struct net_device *net_dev);
+int hardif_enable_interface(struct hard_iface *hard_iface,
+			    const char *iface_name);
 void hardif_disable_interface(struct hard_iface *hard_iface);
 void hardif_remove_interfaces(void);
 int hardif_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index c5213d8..2a17250 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -46,15 +46,16 @@
 {
 	struct hashtable_t *hash;
 
-	hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
+	hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
 	if (!hash)
 		return NULL;
 
-	hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
+	hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
 	if (!hash->table)
 		goto free_hash;
 
-	hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
+	hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
+				   GFP_ATOMIC);
 	if (!hash->list_locks)
 		goto free_table;
 
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 434822b..dd5c9fd 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -28,12 +28,12 @@
  * compare 2 element datas for their keys,
  * return 0 if same and not 0 if not
  * same */
-typedef int (*hashdata_compare_cb)(struct hlist_node *, void *);
+typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
 
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
-typedef int (*hashdata_choose_cb)(void *, int);
+typedef int (*hashdata_choose_cb)(const void *, int);
 typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
@@ -80,7 +80,7 @@
 static inline int hash_add(struct hashtable_t *hash,
 			   hashdata_compare_cb compare,
 			   hashdata_choose_cb choose,
-			   void *data, struct hlist_node *data_node)
+			   const void *data, struct hlist_node *data_node)
 {
 	int index;
 	struct hlist_head *head;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index fa22ba2..ac3520e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -46,7 +46,7 @@
 
 	nonseekable_open(inode, file);
 
-	socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
+	socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
 
 	if (!socket_client)
 		return -ENOMEM;
@@ -310,7 +310,7 @@
 {
 	struct socket_packet *socket_packet;
 
-	socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
+	socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
 
 	if (!socket_packet)
 		return;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 0a7cee0..e367e69 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -84,8 +84,10 @@
 
 	spin_lock_init(&bat_priv->forw_bat_list_lock);
 	spin_lock_init(&bat_priv->forw_bcast_list_lock);
-	spin_lock_init(&bat_priv->tt_lhash_lock);
-	spin_lock_init(&bat_priv->tt_ghash_lock);
+	spin_lock_init(&bat_priv->tt_changes_list_lock);
+	spin_lock_init(&bat_priv->tt_req_list_lock);
+	spin_lock_init(&bat_priv->tt_roam_list_lock);
+	spin_lock_init(&bat_priv->tt_buff_lock);
 	spin_lock_init(&bat_priv->gw_list_lock);
 	spin_lock_init(&bat_priv->vis_hash_lock);
 	spin_lock_init(&bat_priv->vis_list_lock);
@@ -96,14 +98,14 @@
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
 	INIT_HLIST_HEAD(&bat_priv->gw_list);
 	INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
+	INIT_LIST_HEAD(&bat_priv->tt_changes_list);
+	INIT_LIST_HEAD(&bat_priv->tt_req_list);
+	INIT_LIST_HEAD(&bat_priv->tt_roam_list);
 
 	if (originator_init(bat_priv) < 1)
 		goto err;
 
-	if (tt_local_init(bat_priv) < 1)
-		goto err;
-
-	if (tt_global_init(bat_priv) < 1)
+	if (tt_init(bat_priv) < 1)
 		goto err;
 
 	tt_local_add(soft_iface, soft_iface->dev_addr);
@@ -111,6 +113,7 @@
 	if (vis_init(bat_priv) < 1)
 		goto err;
 
+	atomic_set(&bat_priv->gw_reselect, 0);
 	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
 	goto end;
 
@@ -137,8 +140,7 @@
 	gw_node_purge(bat_priv);
 	originator_free(bat_priv);
 
-	tt_local_free(bat_priv);
-	tt_global_free(bat_priv);
+	tt_free(bat_priv);
 
 	softif_neigh_purge(bat_priv);
 
@@ -155,9 +157,9 @@
 	module_put(THIS_MODULE);
 }
 
-int is_my_mac(uint8_t *addr)
+int is_my_mac(const uint8_t *addr)
 {
-	struct hard_iface *hard_iface;
+	const struct hard_iface *hard_iface;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 148b49e..4f293b5 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -42,15 +42,25 @@
  * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
 #define PURGE_TIMEOUT 200
 #define TT_LOCAL_TIMEOUT 3600 /* in seconds */
-
+#define TT_CLIENT_ROAM_TIMEOUT 600
 /* sliding packet range of received originator messages in squence numbers
  * (should be a multiple of our word size) */
 #define TQ_LOCAL_WINDOW_SIZE 64
+#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
+
 #define TQ_GLOBAL_WINDOW_SIZE 5
 #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
 #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
 #define TQ_TOTAL_BIDRECT_LIMIT 1
 
+#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
+
+#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most
+			     * ROAMING_MAX_COUNT times */
+#define ROAMING_MAX_COUNT 5
+
+#define NO_FLAGS 0
+
 #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
 
 #define LOG_BUF_LEN 8192	  /* has to be a power of 2 */
@@ -72,13 +82,27 @@
 #define RESET_PROTECTION_MS 30000
 #define EXPECTED_SEQNO_RANGE	65536
 
-#define MESH_INACTIVE 0
-#define MESH_ACTIVE 1
-#define MESH_DEACTIVATING 2
+enum mesh_state {
+	MESH_INACTIVE,
+	MESH_ACTIVE,
+	MESH_DEACTIVATING
+};
 
 #define BCAST_QUEUE_LEN		256
 #define BATMAN_QUEUE_LEN	256
 
+enum uev_action {
+	UEV_ADD = 0,
+	UEV_DEL,
+	UEV_CHANGE
+};
+
+enum uev_type {
+	UEV_GW = 0
+};
+
+#define GW_THRESHOLD	50
+
 /*
  * Debug Messages
  */
@@ -89,10 +113,12 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 /* all messages related to routing / flooding / broadcasting / etc */
-#define DBG_BATMAN 1
-/* route or tt entry added / changed / deleted */
-#define DBG_ROUTES 2
-#define DBG_ALL 3
+enum dbg_level {
+	DBG_BATMAN = 1 << 0,
+	DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
+	DBG_TT	   = 1 << 2, /* translation table operations */
+	DBG_ALL    = 7
+};
 
 
 /*
@@ -133,10 +159,10 @@
 void mesh_free(struct net_device *soft_iface);
 void inc_module_count(void);
 void dec_module_count(void);
-int is_my_mac(uint8_t *addr);
+int is_my_mac(const uint8_t *addr);
 
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
+int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
 
 #define bat_dbg(type, bat_priv, fmt, arg...)			\
 	do {							\
@@ -145,9 +171,10 @@
 	}							\
 	while (0)
 #else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __always_unused,
+__printf(3, 4)
+static inline void bat_dbg(int type __always_unused,
 			   struct bat_priv *bat_priv __always_unused,
-			   char *fmt __always_unused, ...)
+			   const char *fmt __always_unused, ...)
 {
 }
 #endif
@@ -172,11 +199,32 @@
  *
  * note: can't use compare_ether_addr() as it requires aligned memory
  */
-static inline int compare_eth(void *data1, void *data2)
+
+static inline int compare_eth(const void *data1, const void *data2)
 {
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+
 #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
 
+/* Returns the smallest signed integer in two's complement with the sizeof x */
+#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+
+/* Checks if a sequence number x is a predecessor/successor of y.
+ * they handle overflows/underflows and can correctly check for a
+ * predecessor/successor unless the variable sequence number has grown by
+ * more then 2**(bitwidth(x)-1)-1.
+ * This means that for a uint8_t with the maximum value 255, it would think:
+ *  - when adding nothing - it is neither a predecessor nor a successor
+ *  - before adding more than 127 to the starting value - it is a predecessor,
+ *  - when adding 128 - it is neither a predecessor nor a successor,
+ *  - after adding more than 127 to the starting value - it is a successor */
+#define seq_before(x, y) ({typeof(x) _d1 = (x); \
+			  typeof(y) _d2 = (y); \
+			  typeof(x) _dummy = (_d1 - _d2); \
+			  (void) (&_d1 == &_d2); \
+			  _dummy > smallest_signed_int(_dummy); })
+#define seq_after(x, y) seq_before(y, x)
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 40a30bb..338b3c5 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -37,6 +37,14 @@
 	queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
 }
 
+/* returns 1 if they are the same originator */
+static int compare_orig(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct orig_node, hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 int originator_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->orig_hash)
@@ -77,7 +85,7 @@
 
 struct neigh_node *create_neighbor(struct orig_node *orig_node,
 				   struct orig_node *orig_neigh_node,
-				   uint8_t *neigh,
+				   const uint8_t *neigh,
 				   struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -86,7 +94,7 @@
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Creating new last-hop neighbor of originator\n");
 
-	neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
+	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
 	if (!neigh_node)
 		return NULL;
 
@@ -137,6 +145,7 @@
 	tt_global_del_orig(orig_node->bat_priv, orig_node,
 			    "originator timed out");
 
+	kfree(orig_node->tt_buff);
 	kfree(orig_node->bcast_own);
 	kfree(orig_node->bcast_own_sum);
 	kfree(orig_node);
@@ -183,7 +192,7 @@
 
 /* this function finds or creates an originator entry for the given
  * address if it does not exits */
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
+struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
 {
 	struct orig_node *orig_node;
 	int size;
@@ -196,7 +205,7 @@
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Creating new originator: %pM\n", addr);
 
-	orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
+	orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
 	if (!orig_node)
 		return NULL;
 
@@ -205,14 +214,18 @@
 	spin_lock_init(&orig_node->ogm_cnt_lock);
 	spin_lock_init(&orig_node->bcast_seqno_lock);
 	spin_lock_init(&orig_node->neigh_list_lock);
+	spin_lock_init(&orig_node->tt_buff_lock);
 
 	/* extra reference for return */
 	atomic_set(&orig_node->refcount, 2);
 
+	orig_node->tt_poss_change = false;
 	orig_node->bat_priv = bat_priv;
 	memcpy(orig_node->orig, addr, ETH_ALEN);
 	orig_node->router = NULL;
 	orig_node->tt_buff = NULL;
+	orig_node->tt_buff_len = 0;
+	atomic_set(&orig_node->tt_size, 0);
 	orig_node->bcast_seqno_reset = jiffies - 1
 					- msecs_to_jiffies(RESET_PROTECTION_MS);
 	orig_node->batman_seqno_reset = jiffies - 1
@@ -322,9 +335,7 @@
 		if (purge_orig_neighbors(bat_priv, orig_node,
 							&best_neigh_node)) {
 			update_routes(bat_priv, orig_node,
-				      best_neigh_node,
-				      orig_node->tt_buff,
-				      orig_node->tt_buff_len);
+				      best_neigh_node);
 		}
 	}
 
@@ -559,7 +570,7 @@
 	memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
 
 	/* copy second part */
-	memcpy(data_ptr + del_if_num * chunk_size,
+	memcpy((char *)data_ptr + del_if_num * chunk_size,
 	       orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
 	       (max_if_num - del_if_num) * chunk_size);
 
@@ -579,7 +590,7 @@
 	memcpy(data_ptr, orig_node->bcast_own_sum,
 	       del_if_num * sizeof(uint8_t));
 
-	memcpy(data_ptr + del_if_num * sizeof(uint8_t),
+	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
 	       orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
 	       (max_if_num - del_if_num) * sizeof(uint8_t));
 
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index e1d641f..cfc1f60 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -28,10 +28,10 @@
 void originator_free(struct bat_priv *bat_priv);
 void purge_orig_ref(struct bat_priv *bat_priv);
 void orig_node_free_ref(struct orig_node *orig_node);
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
+struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
 struct neigh_node *create_neighbor(struct orig_node *orig_node,
 				   struct orig_node *orig_neigh_node,
-				   uint8_t *neigh,
+				   const uint8_t *neigh,
 				   struct hard_iface *if_incoming);
 void neigh_node_free_ref(struct neigh_node *neigh_node);
 struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
@@ -40,19 +40,11 @@
 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 
-/* returns 1 if they are the same originator */
-static inline int compare_orig(struct hlist_node *node, void *data2)
-{
-	void *data1 = container_of(node, struct orig_node, hash_entry);
-
-	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
 /* hashfunction to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(void *data, int32_t size)
+static inline int choose_orig(const void *data, int32_t size)
 {
-	unsigned char *key = data;
+	const unsigned char *key = data;
 	uint32_t hash = 0;
 	size_t i;
 
@@ -70,7 +62,7 @@
 }
 
 static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
-					       void *data)
+					       const void *data)
 {
 	struct hashtable_t *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index eda9965..c5f081d 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -24,46 +24,79 @@
 
 #define ETH_P_BATMAN  0x4305	/* unofficial/not registered Ethertype */
 
-#define BAT_PACKET       0x01
-#define BAT_ICMP         0x02
-#define BAT_UNICAST      0x03
-#define BAT_BCAST        0x04
-#define BAT_VIS          0x05
-#define BAT_UNICAST_FRAG 0x06
+enum bat_packettype {
+	BAT_PACKET       = 0x01,
+	BAT_ICMP         = 0x02,
+	BAT_UNICAST      = 0x03,
+	BAT_BCAST        = 0x04,
+	BAT_VIS          = 0x05,
+	BAT_UNICAST_FRAG = 0x06,
+	BAT_TT_QUERY     = 0x07,
+	BAT_ROAM_ADV     = 0x08
+};
 
 /* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 12
-#define DIRECTLINK 0x40
-#define VIS_SERVER 0x20
-#define PRIMARIES_FIRST_HOP 0x10
+#define COMPAT_VERSION 14
+
+enum batman_flags {
+	PRIMARIES_FIRST_HOP = 1 << 4,
+	VIS_SERVER	    = 1 << 5,
+	DIRECTLINK	    = 1 << 6
+};
 
 /* ICMP message types */
-#define ECHO_REPLY 0
-#define DESTINATION_UNREACHABLE 3
-#define ECHO_REQUEST 8
-#define TTL_EXCEEDED 11
-#define PARAMETER_PROBLEM 12
+enum icmp_packettype {
+	ECHO_REPLY		= 0,
+	DESTINATION_UNREACHABLE = 3,
+	ECHO_REQUEST		= 8,
+	TTL_EXCEEDED		= 11,
+	PARAMETER_PROBLEM	= 12
+};
 
 /* vis defines */
-#define VIS_TYPE_SERVER_SYNC		0
-#define VIS_TYPE_CLIENT_UPDATE		1
+enum vis_packettype {
+	VIS_TYPE_SERVER_SYNC   = 0,
+	VIS_TYPE_CLIENT_UPDATE = 1
+};
 
 /* fragmentation defines */
-#define UNI_FRAG_HEAD 0x01
-#define UNI_FRAG_LARGETAIL 0x02
+enum unicast_frag_flags {
+	UNI_FRAG_HEAD	   = 1 << 0,
+	UNI_FRAG_LARGETAIL = 1 << 1
+};
+
+/* TT_QUERY subtypes */
+#define TT_QUERY_TYPE_MASK 0x3
+
+enum tt_query_packettype {
+	TT_REQUEST    = 0,
+	TT_RESPONSE   = 1
+};
+
+/* TT_QUERY flags */
+enum tt_query_flags {
+	TT_FULL_TABLE = 1 << 2
+};
+
+/* TT_CHANGE flags */
+enum tt_change_flags {
+	TT_CHANGE_DEL  = 0x01,
+	TT_CLIENT_ROAM = 0x02
+};
 
 struct batman_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
+	uint8_t  ttl;
 	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
-	uint8_t  tq;
 	uint32_t seqno;
 	uint8_t  orig[6];
 	uint8_t  prev_sender[6];
-	uint8_t  ttl;
-	uint8_t  num_tt;
 	uint8_t  gw_flags;  /* flags related to gateway class */
-	uint8_t  align;
+	uint8_t  tq;
+	uint8_t  tt_num_changes;
+	uint8_t  ttvn; /* translation table version number */
+	uint16_t tt_crc;
 } __packed;
 
 #define BAT_PACKET_LEN sizeof(struct batman_packet)
@@ -71,12 +104,13 @@
 struct icmp_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  ttl;
+	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  dst[6];
 	uint8_t  orig[6];
 	uint16_t seqno;
 	uint8_t  uid;
+	uint8_t  reserved;
 } __packed;
 
 #define BAT_RR_LEN 16
@@ -86,8 +120,8 @@
 struct icmp_packet_rr {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  ttl;
+	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  dst[6];
 	uint8_t  orig[6];
 	uint16_t seqno;
@@ -99,16 +133,19 @@
 struct unicast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
 	uint8_t  ttl;
+	uint8_t  ttvn; /* destination translation table version number */
+	uint8_t  dest[6];
 } __packed;
 
 struct unicast_frag_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
 	uint8_t  ttl;
+	uint8_t  ttvn; /* destination translation table version number */
+	uint8_t  dest[6];
 	uint8_t  flags;
+	uint8_t  align;
 	uint8_t  orig[6];
 	uint16_t seqno;
 } __packed;
@@ -116,21 +153,61 @@
 struct bcast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  orig[6];
 	uint8_t  ttl;
+	uint8_t  reserved;
 	uint32_t seqno;
+	uint8_t  orig[6];
 } __packed;
 
 struct vis_packet {
 	uint8_t  packet_type;
 	uint8_t  version;        /* batman version field */
-	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
-	uint8_t  entries;	 /* number of entries behind this struct */
-	uint32_t seqno;		 /* sequence number */
 	uint8_t  ttl;		 /* TTL */
+	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
+	uint32_t seqno;		 /* sequence number */
+	uint8_t  entries;	 /* number of entries behind this struct */
+	uint8_t  reserved;
 	uint8_t  vis_orig[6];	 /* originator that announces its neighbors */
 	uint8_t  target_orig[6]; /* who should receive this packet */
 	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
 } __packed;
 
+struct tt_query_packet {
+	uint8_t  packet_type;
+	uint8_t  version;  /* batman version field */
+	uint8_t  ttl;
+	/* the flag field is a combination of:
+	 * - TT_REQUEST or TT_RESPONSE
+	 * - TT_FULL_TABLE */
+	uint8_t  flags;
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  src[ETH_ALEN];
+	/* the ttvn field is:
+	 * if TT_REQUEST: ttvn that triggered the
+	 *		  request
+	 * if TT_RESPONSE: new ttvn for the src
+	 *		   orig_node */
+	uint8_t  ttvn;
+	/* tt_data field is:
+	 * if TT_REQUEST: crc associated with the
+	 *		  ttvn
+	 * if TT_RESPONSE: table_size */
+	uint16_t tt_data;
+} __packed;
+
+struct roam_adv_packet {
+	uint8_t  packet_type;
+	uint8_t  version;
+	uint8_t  ttl;
+	uint8_t  reserved;
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  src[ETH_ALEN];
+	uint8_t  client[ETH_ALEN];
+} __packed;
+
+struct tt_change {
+	uint8_t flags;
+	uint8_t addr[ETH_ALEN];
+} __packed;
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index 5bb6a61..f1ccfa7 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -28,9 +28,9 @@
 	*lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
 }
 
-uint8_t ring_buffer_avg(uint8_t lq_recv[])
+uint8_t ring_buffer_avg(const uint8_t lq_recv[])
 {
-	uint8_t *ptr;
+	const uint8_t *ptr;
 	uint16_t count = 0, i = 0, sum = 0;
 
 	ptr = lq_recv;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 0395b27..7cdfe62 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -23,6 +23,6 @@
 #define _NET_BATMAN_ADV_RING_BUFFER_H_
 
 void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
-uint8_t ring_buffer_avg(uint8_t lq_recv[]);
+uint8_t ring_buffer_avg(const uint8_t lq_recv[]);
 
 #endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bb1c3ec..0ce090c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,28 +64,57 @@
 	}
 }
 
-static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		       unsigned char *tt_buff, int tt_buff_len)
+static void update_transtable(struct bat_priv *bat_priv,
+			      struct orig_node *orig_node,
+			      const unsigned char *tt_buff,
+			      uint8_t tt_num_changes, uint8_t ttvn,
+			      uint16_t tt_crc)
 {
-	if ((tt_buff_len != orig_node->tt_buff_len) ||
-	    ((tt_buff_len > 0) &&
-	     (orig_node->tt_buff_len > 0) &&
-	     (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
+	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	bool full_table = true;
 
-		if (orig_node->tt_buff_len > 0)
-			tt_global_del_orig(bat_priv, orig_node,
-					    "originator changed tt");
+	/* the ttvn increased by one -> we can apply the attached changes */
+	if (ttvn - orig_ttvn == 1) {
+		/* the OGM could not contain the changes because they were too
+		 * many to fit in one frame or because they have already been
+		 * sent TT_OGM_APPEND_MAX times. In this case send a tt
+		 * request */
+		if (!tt_num_changes) {
+			full_table = false;
+			goto request_table;
+		}
 
-		if ((tt_buff_len > 0) && (tt_buff))
-			tt_global_add_orig(bat_priv, orig_node,
-					    tt_buff, tt_buff_len);
+		tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
+				  (struct tt_change *)tt_buff);
+
+		/* Even if we received the crc into the OGM, we prefer
+		 * to recompute it to spot any possible inconsistency
+		 * in the global table */
+		orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+		/* Roaming phase is over: tables are in sync again. I can
+		 * unset the flag */
+		orig_node->tt_poss_change = false;
+	} else {
+		/* if we missed more than one change or our tables are not
+		 * in sync anymore -> request fresh tt data */
+		if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
+request_table:
+			bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
+				"Need to retrieve the correct information "
+				"(ttvn: %u last_ttvn: %u crc: %u last_crc: "
+				"%u num_changes: %u)\n", orig_node->orig, ttvn,
+				orig_ttvn, tt_crc, orig_node->tt_crc,
+				tt_num_changes);
+			send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
+					full_table);
+			return;
+		}
 	}
 }
 
 static void update_route(struct bat_priv *bat_priv,
 			 struct orig_node *orig_node,
-			 struct neigh_node *neigh_node,
-			 unsigned char *tt_buff, int tt_buff_len)
+			 struct neigh_node *neigh_node)
 {
 	struct neigh_node *curr_router;
 
@@ -93,11 +122,10 @@
 
 	/* route deleted */
 	if ((curr_router) && (!neigh_node)) {
-
 		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
 			orig_node->orig);
 		tt_global_del_orig(bat_priv, orig_node,
-				    "originator timed out");
+				    "Deleted route towards originator");
 
 	/* route added */
 	} else if ((!curr_router) && (neigh_node)) {
@@ -105,11 +133,8 @@
 		bat_dbg(DBG_ROUTES, bat_priv,
 			"Adding route towards: %pM (via %pM)\n",
 			orig_node->orig, neigh_node->addr);
-		tt_global_add_orig(bat_priv, orig_node,
-				    tt_buff, tt_buff_len);
-
 	/* route changed */
-	} else {
+	} else if (neigh_node && curr_router) {
 		bat_dbg(DBG_ROUTES, bat_priv,
 			"Changing route towards: %pM "
 			"(now via %pM - was via %pM)\n",
@@ -133,10 +158,8 @@
 		neigh_node_free_ref(curr_router);
 }
 
-
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, unsigned char *tt_buff,
-		   int tt_buff_len)
+		   struct neigh_node *neigh_node)
 {
 	struct neigh_node *router = NULL;
 
@@ -146,11 +169,7 @@
 	router = orig_node_get_router(orig_node);
 
 	if (router != neigh_node)
-		update_route(bat_priv, orig_node, neigh_node,
-			     tt_buff, tt_buff_len);
-	/* may be just TT changed */
-	else
-		update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
+		update_route(bat_priv, orig_node, neigh_node);
 
 out:
 	if (router)
@@ -165,7 +184,7 @@
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
 	struct hlist_node *node;
-	unsigned char total_count;
+	uint8_t total_count;
 	uint8_t orig_eq_count, neigh_rq_count, tq_own;
 	int tq_asym_penalty, ret = 0;
 
@@ -348,9 +367,9 @@
 }
 
 /* copy primary address for bonding */
-static void bonding_save_primary(struct orig_node *orig_node,
+static void bonding_save_primary(const struct orig_node *orig_node,
 				 struct orig_node *orig_neigh_node,
-				 struct batman_packet *batman_packet)
+				 const struct batman_packet *batman_packet)
 {
 	if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
 		return;
@@ -358,19 +377,16 @@
 	memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
 }
 
-static void update_orig(struct bat_priv *bat_priv,
-			struct orig_node *orig_node,
-			struct ethhdr *ethhdr,
-			struct batman_packet *batman_packet,
+static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
+			const struct ethhdr *ethhdr,
+			const struct batman_packet *batman_packet,
 			struct hard_iface *if_incoming,
-			unsigned char *tt_buff, int tt_buff_len,
-			char is_duplicate)
+			const unsigned char *tt_buff, int is_duplicate)
 {
 	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
 	struct neigh_node *router = NULL;
 	struct orig_node *orig_node_tmp;
 	struct hlist_node *node;
-	int tmp_tt_buff_len;
 	uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
 
 	bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -435,9 +451,6 @@
 
 	bonding_candidate_add(orig_node, neigh_node);
 
-	tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
-			    batman_packet->num_tt * ETH_ALEN : tt_buff_len);
-
 	/* if this neighbor already is our next hop there is nothing
 	 * to change */
 	router = orig_node_get_router(orig_node);
@@ -467,15 +480,19 @@
 			goto update_tt;
 	}
 
-	update_routes(bat_priv, orig_node, neigh_node,
-		      tt_buff, tmp_tt_buff_len);
-	goto update_gw;
+	update_routes(bat_priv, orig_node, neigh_node);
 
 update_tt:
-	update_routes(bat_priv, orig_node, router,
-		      tt_buff, tmp_tt_buff_len);
+	/* I have to check for transtable changes only if the OGM has been
+	 * sent through a primary interface */
+	if (((batman_packet->orig != ethhdr->h_source) &&
+				(batman_packet->ttl > 2)) ||
+				(batman_packet->flags & PRIMARIES_FIRST_HOP))
+		update_transtable(bat_priv, orig_node, tt_buff,
+				  batman_packet->tt_num_changes,
+				  batman_packet->ttvn,
+				  batman_packet->tt_crc);
 
-update_gw:
 	if (orig_node->gw_flags != batman_packet->gw_flags)
 		gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
 
@@ -531,15 +548,15 @@
  *  -1 the packet is old and has been received while the seqno window
  *     was protected. Caller should drop it.
  */
-static char count_real_packets(struct ethhdr *ethhdr,
-			       struct batman_packet *batman_packet,
-			       struct hard_iface *if_incoming)
+static int count_real_packets(const struct ethhdr *ethhdr,
+			       const struct batman_packet *batman_packet,
+			       const struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct orig_node *orig_node;
 	struct neigh_node *tmp_neigh_node;
 	struct hlist_node *node;
-	char is_duplicate = 0;
+	int is_duplicate = 0;
 	int32_t seq_diff;
 	int need_update = 0;
 	int set_mark, ret = -1;
@@ -595,9 +612,9 @@
 	return ret;
 }
 
-void receive_bat_packet(struct ethhdr *ethhdr,
+void receive_bat_packet(const struct ethhdr *ethhdr,
 			struct batman_packet *batman_packet,
-			unsigned char *tt_buff, int tt_buff_len,
+			const unsigned char *tt_buff,
 			struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -605,10 +622,10 @@
 	struct orig_node *orig_neigh_node, *orig_node;
 	struct neigh_node *router = NULL, *router_router = NULL;
 	struct neigh_node *orig_neigh_router = NULL;
-	char has_directlink_flag;
-	char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
-	char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
-	char is_duplicate;
+	int has_directlink_flag;
+	int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
+	int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+	int is_duplicate;
 	uint32_t if_incoming_seqno;
 
 	/* Silently drop when the batman packet is actually not a
@@ -636,12 +653,14 @@
 
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Received BATMAN packet via NB: %pM, IF: %s [%pM] "
-		"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
-		"TTL %d, V %d, IDF %d)\n",
+		"(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
+		"crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
 		ethhdr->h_source, if_incoming->net_dev->name,
 		if_incoming->net_dev->dev_addr, batman_packet->orig,
 		batman_packet->prev_sender, batman_packet->seqno,
-		batman_packet->tq, batman_packet->ttl, batman_packet->version,
+		batman_packet->ttvn, batman_packet->tt_crc,
+		batman_packet->tt_num_changes, batman_packet->tq,
+		batman_packet->ttl, batman_packet->version,
 		has_directlink_flag);
 
 	rcu_read_lock();
@@ -664,7 +683,7 @@
 				hard_iface->net_dev->dev_addr))
 			is_my_oldorig = 1;
 
-		if (compare_eth(ethhdr->h_source, broadcast_addr))
+		if (is_broadcast_ether_addr(ethhdr->h_source))
 			is_broadcast = 1;
 	}
 	rcu_read_unlock();
@@ -701,17 +720,16 @@
 
 		/* neighbor has to indicate direct link and it has to
 		 * come via the corresponding interface */
-		/* if received seqno equals last send seqno save new
-		 * seqno for bidirectional check */
+		/* save packet seqno for bidirectional check */
 		if (has_directlink_flag &&
 		    compare_eth(if_incoming->net_dev->dev_addr,
-				batman_packet->orig) &&
-		    (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
+				batman_packet->orig)) {
 			offset = if_incoming->if_num * NUM_WORDS;
 
 			spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
 			word = &(orig_neigh_node->bcast_own[offset]);
-			bit_mark(word, 0);
+			bit_mark(word,
+				 if_incoming_seqno - batman_packet->seqno - 2);
 			orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
 				bit_packet_count(word);
 			spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
@@ -794,14 +812,14 @@
 	     ((orig_node->last_real_seqno == batman_packet->seqno) &&
 	      (orig_node->last_ttl - 3 <= batman_packet->ttl))))
 		update_orig(bat_priv, orig_node, ethhdr, batman_packet,
-			    if_incoming, tt_buff, tt_buff_len, is_duplicate);
+			    if_incoming, tt_buff, is_duplicate);
 
 	/* is single hop (direct) neighbor */
 	if (is_single_hop_neigh) {
 
 		/* mark direct link on incoming interface */
 		schedule_forward_packet(orig_node, ethhdr, batman_packet,
-					1, tt_buff_len, if_incoming);
+					1, if_incoming);
 
 		bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
 			"rebroadcast neighbor packet with direct link flag\n");
@@ -824,7 +842,7 @@
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Forwarding packet: rebroadcast originator packet\n");
 	schedule_forward_packet(orig_node, ethhdr, batman_packet,
-				0, tt_buff_len, if_incoming);
+				0, if_incoming);
 
 out_neigh:
 	if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1077,7 +1095,7 @@
  * This method rotates the bonding list and increases the
  * returned router's refcount. */
 static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
-					   struct hard_iface *recv_if)
+					   const struct hard_iface *recv_if)
 {
 	struct neigh_node *tmp_neigh_node;
 	struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1128,7 +1146,7 @@
  *
  * Increases the returned router's refcount */
 static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
-					      struct hard_iface *recv_if)
+					      const struct hard_iface *recv_if)
 {
 	struct neigh_node *tmp_neigh_node;
 	struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1171,12 +1189,124 @@
 	return router;
 }
 
+int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+{
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct tt_query_packet *tt_query;
+	struct ethhdr *ethhdr;
+
+	/* drop packet if it has not necessary minimum size */
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
+		goto out;
+
+	/* I could need to modify it */
+	if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	/* packet with unicast indication but broadcast recipient */
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	/* packet with broadcast sender address */
+	if (is_broadcast_ether_addr(ethhdr->h_source))
+		goto out;
+
+	tt_query = (struct tt_query_packet *)skb->data;
+
+	tt_query->tt_data = ntohs(tt_query->tt_data);
+
+	switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
+	case TT_REQUEST:
+		/* If we cannot provide an answer the tt_request is
+		 * forwarded */
+		if (!send_tt_response(bat_priv, tt_query)) {
+			bat_dbg(DBG_TT, bat_priv,
+				"Routing TT_REQUEST to %pM [%c]\n",
+				tt_query->dst,
+				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
+			tt_query->tt_data = htons(tt_query->tt_data);
+			return route_unicast_packet(skb, recv_if);
+		}
+		break;
+	case TT_RESPONSE:
+		/* packet needs to be linearised to access the TT changes */
+		if (skb_linearize(skb) < 0)
+			goto out;
+
+		if (is_my_mac(tt_query->dst))
+			handle_tt_response(bat_priv, tt_query);
+		else {
+			bat_dbg(DBG_TT, bat_priv,
+				"Routing TT_RESPONSE to %pM [%c]\n",
+				tt_query->dst,
+				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
+			tt_query->tt_data = htons(tt_query->tt_data);
+			return route_unicast_packet(skb, recv_if);
+		}
+		break;
+	}
+
+out:
+	/* returning NET_RX_DROP will make the caller function kfree the skb */
+	return NET_RX_DROP;
+}
+
+int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
+{
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct roam_adv_packet *roam_adv_packet;
+	struct orig_node *orig_node;
+	struct ethhdr *ethhdr;
+
+	/* drop packet if it has not necessary minimum size */
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	/* packet with unicast indication but broadcast recipient */
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	/* packet with broadcast sender address */
+	if (is_broadcast_ether_addr(ethhdr->h_source))
+		goto out;
+
+	roam_adv_packet = (struct roam_adv_packet *)skb->data;
+
+	if (!is_my_mac(roam_adv_packet->dst))
+		return route_unicast_packet(skb, recv_if);
+
+	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
+	if (!orig_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
+		"(client %pM)\n", roam_adv_packet->src,
+		roam_adv_packet->client);
+
+	tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
+		      atomic_read(&orig_node->last_ttvn) + 1, true);
+
+	/* Roaming phase starts: I have new information but the ttvn has not
+	 * been incremented yet. This flag will make me check all the incoming
+	 * packets for the correct destination. */
+	bat_priv->tt_poss_change = true;
+
+	orig_node_free_ref(orig_node);
+out:
+	/* returning NET_RX_DROP will make the caller function kfree the skb */
+	return NET_RX_DROP;
+}
+
 /* find a suitable router for this originator, and use
  * bonding if possible. increases the found neighbors
  * refcount.*/
 struct neigh_node *find_router(struct bat_priv *bat_priv,
 			       struct orig_node *orig_node,
-			       struct hard_iface *recv_if)
+			       const struct hard_iface *recv_if)
 {
 	struct orig_node *primary_orig_node;
 	struct orig_node *router_orig;
@@ -1240,6 +1370,9 @@
 		router = find_ifalter_router(primary_orig_node, recv_if);
 
 return_router:
+	if (router && router->if_incoming->if_status != IF_ACTIVE)
+		goto err_unlock;
+
 	rcu_read_unlock();
 	return router;
 err_unlock:
@@ -1354,14 +1487,84 @@
 	return ret;
 }
 
+static int check_unicast_ttvn(struct bat_priv *bat_priv,
+			       struct sk_buff *skb) {
+	uint8_t curr_ttvn;
+	struct orig_node *orig_node;
+	struct ethhdr *ethhdr;
+	struct hard_iface *primary_if;
+	struct unicast_packet *unicast_packet;
+	bool tt_poss_change;
+
+	/* I could need to modify it */
+	if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
+		return 0;
+
+	unicast_packet = (struct unicast_packet *)skb->data;
+
+	if (is_my_mac(unicast_packet->dest)) {
+		tt_poss_change = bat_priv->tt_poss_change;
+		curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+	} else {
+		orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+
+		if (!orig_node)
+			return 0;
+
+		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		tt_poss_change = orig_node->tt_poss_change;
+		orig_node_free_ref(orig_node);
+	}
+
+	/* Check whether I have to reroute the packet */
+	if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
+		/* Linearize the skb before accessing it */
+		if (skb_linearize(skb) < 0)
+			return 0;
+
+		ethhdr = (struct ethhdr *)(skb->data +
+			sizeof(struct unicast_packet));
+		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+
+		if (!orig_node) {
+			if (!is_my_client(bat_priv, ethhdr->h_dest))
+				return 0;
+			primary_if = primary_if_get_selected(bat_priv);
+			if (!primary_if)
+				return 0;
+			memcpy(unicast_packet->dest,
+			       primary_if->net_dev->dev_addr, ETH_ALEN);
+			hardif_free_ref(primary_if);
+		} else {
+			memcpy(unicast_packet->dest, orig_node->orig,
+			       ETH_ALEN);
+			curr_ttvn = (uint8_t)
+				atomic_read(&orig_node->last_ttvn);
+			orig_node_free_ref(orig_node);
+		}
+
+		bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
+			"new_ttvn %u)! Rerouting unicast packet (for %pM) to "
+			"%pM\n", unicast_packet->ttvn, curr_ttvn,
+			ethhdr->h_dest, unicast_packet->dest);
+
+		unicast_packet->ttvn = curr_ttvn;
+	}
+	return 1;
+}
+
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct unicast_packet *unicast_packet;
-	int hdr_size = sizeof(struct unicast_packet);
+	int hdr_size = sizeof(*unicast_packet);
 
 	if (check_unicast_packet(skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
+	if (!check_unicast_ttvn(bat_priv, skb))
+		return NET_RX_DROP;
+
 	unicast_packet = (struct unicast_packet *)skb->data;
 
 	/* packet for me */
@@ -1377,13 +1580,16 @@
 {
 	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct unicast_frag_packet *unicast_packet;
-	int hdr_size = sizeof(struct unicast_frag_packet);
+	int hdr_size = sizeof(*unicast_packet);
 	struct sk_buff *new_skb = NULL;
 	int ret;
 
 	if (check_unicast_packet(skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
+	if (!check_unicast_ttvn(bat_priv, skb))
+		return NET_RX_DROP;
+
 	unicast_packet = (struct unicast_frag_packet *)skb->data;
 
 	/* packet for me */
@@ -1413,7 +1619,7 @@
 	struct orig_node *orig_node = NULL;
 	struct bcast_packet *bcast_packet;
 	struct ethhdr *ethhdr;
-	int hdr_size = sizeof(struct bcast_packet);
+	int hdr_size = sizeof(*bcast_packet);
 	int ret = NET_RX_DROP;
 	int32_t seq_diff;
 
@@ -1491,7 +1697,7 @@
 	struct vis_packet *vis_packet;
 	struct ethhdr *ethhdr;
 	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	int hdr_size = sizeof(struct vis_packet);
+	int hdr_size = sizeof(*vis_packet);
 
 	/* keep skb linear */
 	if (skb_linearize(skb) < 0)
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 870f298..fb14e95 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,13 +23,12 @@
 #define _NET_BATMAN_ADV_ROUTING_H_
 
 void slide_own_bcast_window(struct hard_iface *hard_iface);
-void receive_bat_packet(struct ethhdr *ethhdr,
-				struct batman_packet *batman_packet,
-				unsigned char *tt_buff, int tt_buff_len,
-				struct hard_iface *if_incoming);
+void receive_bat_packet(const struct ethhdr *ethhdr,
+			struct batman_packet *batman_packet,
+			const unsigned char *tt_buff,
+			struct hard_iface *if_incoming);
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, unsigned char *tt_buff,
-		   int tt_buff_len);
+		   struct neigh_node *neigh_node);
 int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
@@ -37,9 +36,11 @@
 int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
 			       struct orig_node *orig_node,
-			       struct hard_iface *recv_if);
+			       const struct hard_iface *recv_if);
 void bonding_candidate_del(struct orig_node *orig_node,
 			   struct neigh_node *neigh_node);
 
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3377927..7a2f082 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -33,14 +33,14 @@
 static void send_outstanding_bcast_packet(struct work_struct *work);
 
 /* apply hop penalty for a normal link */
-static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
+static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
 {
 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
 	return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
 }
 
 /* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(struct bat_priv *bat_priv)
+static unsigned long own_send_time(const struct bat_priv *bat_priv)
 {
 	return jiffies + msecs_to_jiffies(
 		   atomic_read(&bat_priv->orig_interval) -
@@ -55,9 +55,8 @@
 
 /* send out an already prepared packet to the given address via the
  * specified batman interface */
-int send_skb_packet(struct sk_buff *skb,
-				struct hard_iface *hard_iface,
-				uint8_t *dst_addr)
+int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
+		    const uint8_t *dst_addr)
 {
 	struct ethhdr *ethhdr;
 
@@ -74,7 +73,7 @@
 	}
 
 	/* push to the ethernet header. */
-	if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
+	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
 		goto send_skb_err;
 
 	skb_reset_mac_header(skb);
@@ -121,7 +120,7 @@
 	/* adjust all flags and log packets */
 	while (aggregated_packet(buff_pos,
 				 forw_packet->packet_len,
-				 batman_packet->num_tt)) {
+				 batman_packet->tt_num_changes)) {
 
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet */
@@ -136,17 +135,17 @@
 							    "Forwarding"));
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
-			" IDF %s) on interface %s [%pM]\n",
+			" IDF %s, hvn %d) on interface %s [%pM]\n",
 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
 			batman_packet->orig, ntohl(batman_packet->seqno),
 			batman_packet->tq, batman_packet->ttl,
 			(batman_packet->flags & DIRECTLINK ?
 			 "on" : "off"),
-			hard_iface->net_dev->name,
+			batman_packet->ttvn, hard_iface->net_dev->name,
 			hard_iface->net_dev->dev_addr);
 
-		buff_pos += sizeof(struct batman_packet) +
-			(batman_packet->num_tt * ETH_ALEN);
+		buff_pos += sizeof(*batman_packet) +
+			tt_len(batman_packet->tt_num_changes);
 		packet_num++;
 		batman_packet = (struct batman_packet *)
 			(forw_packet->skb->data + buff_pos);
@@ -166,7 +165,7 @@
 	struct bat_priv *bat_priv;
 	struct batman_packet *batman_packet =
 		(struct batman_packet *)(forw_packet->skb->data);
-	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
+	int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 
 	if (!forw_packet->if_incoming) {
 		pr_err("Error - can't forward packet: incoming iface not "
@@ -214,26 +213,18 @@
 	rcu_read_unlock();
 }
 
-static void rebuild_batman_packet(struct bat_priv *bat_priv,
-				  struct hard_iface *hard_iface)
+static void realloc_packet_buffer(struct hard_iface *hard_iface,
+				int new_len)
 {
-	int new_len;
 	unsigned char *new_buff;
 	struct batman_packet *batman_packet;
 
-	new_len = sizeof(struct batman_packet) +
-			(bat_priv->num_local_tt * ETH_ALEN);
 	new_buff = kmalloc(new_len, GFP_ATOMIC);
 
 	/* keep old buffer if kmalloc should fail */
 	if (new_buff) {
 		memcpy(new_buff, hard_iface->packet_buff,
-		       sizeof(struct batman_packet));
-		batman_packet = (struct batman_packet *)new_buff;
-
-		batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
-				new_buff + sizeof(struct batman_packet),
-				new_len - sizeof(struct batman_packet));
+		       sizeof(*batman_packet));
 
 		kfree(hard_iface->packet_buff);
 		hard_iface->packet_buff = new_buff;
@@ -241,6 +232,46 @@
 	}
 }
 
+/* when calling this function (hard_iface == primary_if) has to be true */
+static void prepare_packet_buffer(struct bat_priv *bat_priv,
+				  struct hard_iface *hard_iface)
+{
+	int new_len;
+	struct batman_packet *batman_packet;
+
+	new_len = BAT_PACKET_LEN +
+		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
+
+	/* if we have too many changes for one packet don't send any
+	 * and wait for the tt table request which will be fragmented */
+	if (new_len > hard_iface->soft_iface->mtu)
+		new_len = BAT_PACKET_LEN;
+
+	realloc_packet_buffer(hard_iface, new_len);
+	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
+
+	atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
+
+	/* reset the sending counter */
+	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
+
+	batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
+				hard_iface->packet_buff + BAT_PACKET_LEN,
+				hard_iface->packet_len - BAT_PACKET_LEN);
+
+}
+
+static void reset_packet_buffer(struct bat_priv *bat_priv,
+	struct hard_iface *hard_iface)
+{
+	struct batman_packet *batman_packet;
+
+	realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
+
+	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
+	batman_packet->tt_num_changes = 0;
+}
+
 void schedule_own_packet(struct hard_iface *hard_iface)
 {
 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -266,14 +297,23 @@
 	if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
 		hard_iface->if_status = IF_ACTIVE;
 
-	/* if local tt has changed and interface is a primary interface */
-	if ((atomic_read(&bat_priv->tt_local_changed)) &&
-	    (hard_iface == primary_if))
-		rebuild_batman_packet(bat_priv, hard_iface);
+	if (hard_iface == primary_if) {
+		/* if at least one change happened */
+		if (atomic_read(&bat_priv->tt_local_changes) > 0) {
+			prepare_packet_buffer(bat_priv, hard_iface);
+			/* Increment the TTVN only once per OGM interval */
+			atomic_inc(&bat_priv->ttvn);
+			bat_priv->tt_poss_change = false;
+		}
+
+		/* if the changes have been sent enough times */
+		if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
+			reset_packet_buffer(bat_priv, hard_iface);
+	}
 
 	/**
 	 * NOTE: packet_buff might just have been re-allocated in
-	 * rebuild_batman_packet()
+	 * prepare_packet_buffer() or in reset_packet_buffer()
 	 */
 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
 
@@ -281,6 +321,9 @@
 	batman_packet->seqno =
 		htonl((uint32_t)atomic_read(&hard_iface->seqno));
 
+	batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
+	batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
+
 	if (vis_server == VIS_TYPE_SERVER_SYNC)
 		batman_packet->flags |= VIS_SERVER;
 	else
@@ -291,7 +334,7 @@
 		batman_packet->gw_flags =
 				(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
 	else
-		batman_packet->gw_flags = 0;
+		batman_packet->gw_flags = NO_FLAGS;
 
 	atomic_inc(&hard_iface->seqno);
 
@@ -307,15 +350,16 @@
 }
 
 void schedule_forward_packet(struct orig_node *orig_node,
-			     struct ethhdr *ethhdr,
+			     const struct ethhdr *ethhdr,
 			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int tt_buff_len,
+			     int directlink,
 			     struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct neigh_node *router;
-	unsigned char in_tq, in_ttl, tq_avg = 0;
+	uint8_t in_tq, in_ttl, tq_avg = 0;
 	unsigned long send_time;
+	uint8_t tt_num_changes;
 
 	if (batman_packet->ttl <= 1) {
 		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -326,6 +370,7 @@
 
 	in_tq = batman_packet->tq;
 	in_ttl = batman_packet->ttl;
+	tt_num_changes = batman_packet->tt_num_changes;
 
 	batman_packet->ttl--;
 	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -358,6 +403,7 @@
 		batman_packet->ttl);
 
 	batman_packet->seqno = htonl(batman_packet->seqno);
+	batman_packet->tt_crc = htons(batman_packet->tt_crc);
 
 	/* switch of primaries first hop flag when forwarding */
 	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
@@ -369,7 +415,7 @@
 	send_time = forward_send_time();
 	add_bat_packet_to_list(bat_priv,
 			       (unsigned char *)batman_packet,
-			       sizeof(struct batman_packet) + tt_buff_len,
+			       sizeof(*batman_packet) + tt_len(tt_num_changes),
 			       if_incoming, 0, send_time);
 }
 
@@ -408,11 +454,13 @@
  *
  * The skb is not consumed, so the caller should make sure that the
  * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
+int add_bcast_packet_to_list(struct bat_priv *bat_priv,
+			     const struct sk_buff *skb)
 {
 	struct hard_iface *primary_if = NULL;
 	struct forw_packet *forw_packet;
 	struct bcast_packet *bcast_packet;
+	struct sk_buff *newskb;
 
 	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
 		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
@@ -423,22 +471,22 @@
 	if (!primary_if)
 		goto out_and_inc;
 
-	forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
+	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
 
 	if (!forw_packet)
 		goto out_and_inc;
 
-	skb = skb_copy(skb, GFP_ATOMIC);
-	if (!skb)
+	newskb = skb_copy(skb, GFP_ATOMIC);
+	if (!newskb)
 		goto packet_free;
 
 	/* as we have a copy now, it is safe to decrease the TTL */
-	bcast_packet = (struct bcast_packet *)skb->data;
+	bcast_packet = (struct bcast_packet *)newskb->data;
 	bcast_packet->ttl--;
 
-	skb_reset_mac_header(skb);
+	skb_reset_mac_header(newskb);
 
-	forw_packet->skb = skb;
+	forw_packet->skb = newskb;
 	forw_packet->if_incoming = primary_if;
 
 	/* how often did we send the bcast packet ? */
@@ -537,7 +585,7 @@
 }
 
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-			       struct hard_iface *hard_iface)
+			       const struct hard_iface *hard_iface)
 {
 	struct forw_packet *forw_packet;
 	struct hlist_node *tmp_node, *safe_tmp_node;
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 247172d..633224a 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -22,18 +22,18 @@
 #ifndef _NET_BATMAN_ADV_SEND_H_
 #define _NET_BATMAN_ADV_SEND_H_
 
-int send_skb_packet(struct sk_buff *skb,
-				struct hard_iface *hard_iface,
-				uint8_t *dst_addr);
+int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
+		    const uint8_t *dst_addr);
 void schedule_own_packet(struct hard_iface *hard_iface);
 void schedule_forward_packet(struct orig_node *orig_node,
-			     struct ethhdr *ethhdr,
+			     const struct ethhdr *ethhdr,
 			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int tt_buff_len,
+			     int directlink,
 			     struct hard_iface *if_outgoing);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
+int add_bcast_packet_to_list(struct bat_priv *bat_priv,
+			     const struct sk_buff *skb);
 void send_outstanding_bat_packet(struct work_struct *work);
 void purge_outstanding_packets(struct bat_priv *bat_priv,
-			       struct hard_iface *hard_iface);
+			       const struct hard_iface *hard_iface);
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index d5aa609..2dcdbb7 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -30,6 +30,7 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 #include "bat_sysfs.h"
+#include "originator.h"
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
@@ -123,8 +124,7 @@
 		goto out;
 	}
 
-	softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid),
-				   GFP_ATOMIC);
+	softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
 	if (!softif_neigh_vid)
 		goto out;
 
@@ -146,7 +146,7 @@
 }
 
 static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
-					     uint8_t *addr, short vid)
+					     const uint8_t *addr, short vid)
 {
 	struct softif_neigh_vid *softif_neigh_vid;
 	struct softif_neigh *softif_neigh = NULL;
@@ -170,7 +170,7 @@
 		goto unlock;
 	}
 
-	softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
+	softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
 	if (!softif_neigh)
 		goto unlock;
 
@@ -242,7 +242,8 @@
 	if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
 		new_neigh = NULL;
 
-	curr_neigh = softif_neigh_vid->softif_neigh;
+	curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
+					       1);
 	rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
 
 	if ((curr_neigh) && (!new_neigh))
@@ -380,7 +381,7 @@
 	struct softif_neigh *softif_neigh, *curr_softif_neigh;
 	struct softif_neigh_vid *softif_neigh_vid;
 	struct hlist_node *node, *node_tmp, *node_tmp2;
-	char do_deselect;
+	int do_deselect;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(softif_neigh_vid, node,
@@ -534,7 +535,7 @@
 	/* only modify transtable if it has been initialised before */
 	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
 		tt_local_remove(bat_priv, dev->dev_addr,
-				 "mac address changed");
+				"mac address changed", false);
 		tt_local_add(dev, addr->sa_data);
 	}
 
@@ -553,7 +554,7 @@
 	return 0;
 }
 
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
+static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -561,6 +562,7 @@
 	struct bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
 	struct softif_neigh *curr_softif_neigh = NULL;
+	struct orig_node *orig_node = NULL;
 	int data_len = skb->len, ret;
 	short vid = -1;
 	bool do_bcast = false;
@@ -592,11 +594,13 @@
 	if (curr_softif_neigh)
 		goto dropped;
 
-	/* TODO: check this for locks */
+	/* Register the client MAC in the transtable */
 	tt_local_add(soft_iface, ethhdr->h_source);
 
-	if (is_multicast_ether_addr(ethhdr->h_dest)) {
-		ret = gw_is_target(bat_priv, skb);
+	orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+	if (is_multicast_ether_addr(ethhdr->h_dest) ||
+				(orig_node && orig_node->gw_flags)) {
+		ret = gw_is_target(bat_priv, skb, orig_node);
 
 		if (ret < 0)
 			goto dropped;
@@ -611,7 +615,7 @@
 		if (!primary_if)
 			goto dropped;
 
-		if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
+		if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
 			goto dropped;
 
 		bcast_packet = (struct bcast_packet *)skb->data;
@@ -656,6 +660,8 @@
 		softif_neigh_free_ref(curr_softif_neigh);
 	if (primary_if)
 		hardif_free_ref(primary_if);
+	if (orig_node)
+		orig_node_free_ref(orig_node);
 	return NETDEV_TX_OK;
 }
 
@@ -790,17 +796,16 @@
 
 	SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
 
-	memset(priv, 0, sizeof(struct bat_priv));
+	memset(priv, 0, sizeof(*priv));
 }
 
-struct net_device *softif_create(char *name)
+struct net_device *softif_create(const char *name)
 {
 	struct net_device *soft_iface;
 	struct bat_priv *bat_priv;
 	int ret;
 
-	soft_iface = alloc_netdev(sizeof(struct bat_priv) , name,
-				   interface_setup);
+	soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
 
 	if (!soft_iface) {
 		pr_err("Unable to allocate the batman interface: %s\n", name);
@@ -831,7 +836,13 @@
 
 	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 	atomic_set(&bat_priv->bcast_seqno, 1);
-	atomic_set(&bat_priv->tt_local_changed, 0);
+	atomic_set(&bat_priv->ttvn, 0);
+	atomic_set(&bat_priv->tt_local_changes, 0);
+	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+
+	bat_priv->tt_buff = NULL;
+	bat_priv->tt_buff_len = 0;
+	bat_priv->tt_poss_change = false;
 
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
@@ -872,7 +883,7 @@
 	unregister_netdevice(soft_iface);
 }
 
-int softif_is_valid(struct net_device *net_dev)
+int softif_is_valid(const struct net_device *net_dev)
 {
 #ifdef HAVE_NET_DEVICE_OPS
 	if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
@@ -924,4 +935,3 @@
 {
 	return 1;
 }
-
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 4789b6f..001546f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -25,12 +25,11 @@
 int my_skb_head_push(struct sk_buff *skb, unsigned int len);
 int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
 void softif_neigh_purge(struct bat_priv *bat_priv);
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
 void interface_rx(struct net_device *soft_iface,
 		  struct sk_buff *skb, struct hard_iface *recv_if,
 		  int hdr_size);
-struct net_device *softif_create(char *name);
+struct net_device *softif_create(const char *name);
 void softif_destroy(struct net_device *soft_iface);
-int softif_is_valid(struct net_device *net_dev);
+int softif_is_valid(const struct net_device *net_dev);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7b72966..5f1fcd5 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -23,38 +23,45 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
+#include "send.h"
 #include "hash.h"
 #include "originator.h"
+#include "routing.h"
 
-static void tt_local_purge(struct work_struct *work);
-static void _tt_global_del_orig(struct bat_priv *bat_priv,
-				 struct tt_global_entry *tt_global_entry,
-				 char *message);
+#include <linux/crc16.h>
+
+static void _tt_global_del(struct bat_priv *bat_priv,
+			   struct tt_global_entry *tt_global_entry,
+			   const char *message);
+static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
-static int compare_ltt(struct hlist_node *node, void *data2)
+static int compare_ltt(const struct hlist_node *node, const void *data2)
 {
-	void *data1 = container_of(node, struct tt_local_entry, hash_entry);
+	const void *data1 = container_of(node, struct tt_local_entry,
+					 hash_entry);
 
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
 /* returns 1 if they are the same mac addr */
-static int compare_gtt(struct hlist_node *node, void *data2)
+static int compare_gtt(const struct hlist_node *node, const void *data2)
 {
-	void *data1 = container_of(node, struct tt_global_entry, hash_entry);
+	const void *data1 = container_of(node, struct tt_global_entry,
+					 hash_entry);
 
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
-static void tt_local_start_timer(struct bat_priv *bat_priv)
+static void tt_start_timer(struct bat_priv *bat_priv)
 {
-	INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
+	INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
+	queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
+			   msecs_to_jiffies(5000));
 }
 
 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
-						   void *data)
+						 const void *data)
 {
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
 	struct hlist_head *head;
@@ -73,6 +80,9 @@
 		if (!compare_eth(tt_local_entry, data))
 			continue;
 
+		if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+			continue;
+
 		tt_local_entry_tmp = tt_local_entry;
 		break;
 	}
@@ -82,7 +92,7 @@
 }
 
 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
-						     void *data)
+						   const void *data)
 {
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
 	struct hlist_head *head;
@@ -102,6 +112,9 @@
 		if (!compare_eth(tt_global_entry, data))
 			continue;
 
+		if (!atomic_inc_not_zero(&tt_global_entry->refcount))
+			continue;
+
 		tt_global_entry_tmp = tt_global_entry;
 		break;
 	}
@@ -110,7 +123,57 @@
 	return tt_global_entry_tmp;
 }
 
-int tt_local_init(struct bat_priv *bat_priv)
+static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
+{
+	unsigned long deadline;
+	deadline = starting_time + msecs_to_jiffies(timeout);
+
+	return time_after(jiffies, deadline);
+}
+
+static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
+{
+	if (atomic_dec_and_test(&tt_local_entry->refcount))
+		kfree_rcu(tt_local_entry, rcu);
+}
+
+static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
+{
+	if (atomic_dec_and_test(&tt_global_entry->refcount))
+		kfree_rcu(tt_global_entry, rcu);
+}
+
+static void tt_local_event(struct bat_priv *bat_priv, uint8_t op,
+			   const uint8_t *addr, bool roaming)
+{
+	struct tt_change_node *tt_change_node;
+
+	tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
+
+	if (!tt_change_node)
+		return;
+
+	tt_change_node->change.flags = op;
+	if (roaming)
+		tt_change_node->change.flags |= TT_CLIENT_ROAM;
+
+	memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
+
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+	/* track the change in the OGMinterval list */
+	list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+	atomic_inc(&bat_priv->tt_local_changes);
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+
+	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+}
+
+int tt_len(int changes_num)
+{
+	return changes_num * sizeof(struct tt_change);
+}
+
+static int tt_local_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->tt_local_hash)
 		return 1;
@@ -120,54 +183,35 @@
 	if (!bat_priv->tt_local_hash)
 		return 0;
 
-	atomic_set(&bat_priv->tt_local_changed, 0);
-	tt_local_start_timer(bat_priv);
-
 	return 1;
 }
 
-void tt_local_add(struct net_device *soft_iface, uint8_t *addr)
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
 {
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct tt_local_entry *tt_local_entry;
-	struct tt_global_entry *tt_global_entry;
-	int required_bytes;
+	struct tt_local_entry *tt_local_entry = NULL;
+	struct tt_global_entry *tt_global_entry = NULL;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
 
 	if (tt_local_entry) {
 		tt_local_entry->last_seen = jiffies;
-		return;
+		goto out;
 	}
 
-	/* only announce as many hosts as possible in the batman-packet and
-	   space in batman_packet->num_tt That also should give a limit to
-	   MAC-flooding. */
-	required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
-	required_bytes += BAT_PACKET_LEN;
-
-	if ((required_bytes > ETH_DATA_LEN) ||
-	    (atomic_read(&bat_priv->aggregated_ogms) &&
-	     required_bytes > MAX_AGGREGATION_BYTES) ||
-	    (bat_priv->num_local_tt + 1 > 255)) {
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Can't add new local tt entry (%pM): "
-			"number of local tt entries exceeds packet size\n",
-			addr);
-		return;
-	}
-
-	bat_dbg(DBG_ROUTES, bat_priv,
-		"Creating new local tt entry: %pM\n", addr);
-
-	tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
+	tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
 	if (!tt_local_entry)
-		return;
+		goto out;
+
+	tt_local_event(bat_priv, NO_FLAGS, addr, false);
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+		(uint8_t)atomic_read(&bat_priv->ttvn));
 
 	memcpy(tt_local_entry->addr, addr, ETH_ALEN);
 	tt_local_entry->last_seen = jiffies;
+	atomic_set(&tt_local_entry->refcount, 2);
 
 	/* the batman interface mac address should never be purged */
 	if (compare_eth(addr, soft_iface->dev_addr))
@@ -175,61 +219,75 @@
 	else
 		tt_local_entry->never_purge = 0;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
-
 	hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
 		 tt_local_entry, &tt_local_entry->hash_entry);
-	bat_priv->num_local_tt++;
-	atomic_set(&bat_priv->tt_local_changed, 1);
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
+	atomic_inc(&bat_priv->num_local_tt);
 
 	/* remove address from global hash if present */
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
-
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
 
+	/* Check whether it is a roaming! */
+	if (tt_global_entry) {
+		/* This node is probably going to update its tt table */
+		tt_global_entry->orig_node->tt_poss_change = true;
+		_tt_global_del(bat_priv, tt_global_entry,
+			       "local tt received");
+		send_roam_adv(bat_priv, tt_global_entry->addr,
+			      tt_global_entry->orig_node);
+	}
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
 	if (tt_global_entry)
-		_tt_global_del_orig(bat_priv, tt_global_entry,
-				     "local tt received");
-
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
+		tt_global_entry_free_ref(tt_global_entry);
 }
 
-int tt_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len)
+int tt_changes_fill_buffer(struct bat_priv *bat_priv,
+			   unsigned char *buff, int buff_len)
 {
-	struct hashtable_t *hash = bat_priv->tt_local_hash;
-	struct tt_local_entry *tt_local_entry;
-	struct hlist_node *node;
-	struct hlist_head *head;
-	int i, count = 0;
+	int count = 0, tot_changes = 0;
+	struct tt_change_node *entry, *safe;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+	if (buff_len > 0)
+		tot_changes = buff_len / tt_len(1);
 
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+	atomic_set(&bat_priv->tt_local_changes, 0);
 
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node,
-					 head, hash_entry) {
-			if (buff_len < (count + 1) * ETH_ALEN)
-				break;
-
-			memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
-			       ETH_ALEN);
-
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+			list) {
+		if (count < tot_changes) {
+			memcpy(buff + tt_len(count),
+			       &entry->change, sizeof(struct tt_change));
 			count++;
 		}
-		rcu_read_unlock();
+		list_del(&entry->list);
+		kfree(entry);
 	}
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
 
-	/* if we did not get all new local tts see you next time  ;-) */
-	if (count == bat_priv->num_local_tt)
-		atomic_set(&bat_priv->tt_local_changed, 0);
+	/* Keep the buffer for possible tt_request */
+	spin_lock_bh(&bat_priv->tt_buff_lock);
+	kfree(bat_priv->tt_buff);
+	bat_priv->tt_buff_len = 0;
+	bat_priv->tt_buff = NULL;
+	/* We check whether this new OGM has no changes due to size
+	 * problems */
+	if (buff_len > 0) {
+		/**
+		 * if kmalloc() fails we will reply with the full table
+		 * instead of providing the diff
+		 */
+		bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
+		if (bat_priv->tt_buff) {
+			memcpy(bat_priv->tt_buff, buff, buff_len);
+			bat_priv->tt_buff_len = buff_len;
+		}
+	}
+	spin_unlock_bh(&bat_priv->tt_buff_lock);
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-	return count;
+	return tot_changes;
 }
 
 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -261,10 +319,8 @@
 	}
 
 	seq_printf(seq, "Locally retrieved addresses (from %s) "
-		   "announced via TT:\n",
-		   net_dev->name);
-
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+		   "announced via TT (TTVN: %u):\n",
+		   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
 
 	buf_size = 1;
 	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -279,7 +335,6 @@
 
 	buff = kmalloc(buf_size, GFP_ATOMIC);
 	if (!buff) {
-		spin_unlock_bh(&bat_priv->tt_lhash_lock);
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -299,8 +354,6 @@
 		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-
 	seq_printf(seq, "%s", buff);
 	kfree(buff);
 out:
@@ -309,92 +362,108 @@
 	return ret;
 }
 
-static void _tt_local_del(struct hlist_node *node, void *arg)
-{
-	struct bat_priv *bat_priv = (struct bat_priv *)arg;
-	void *data = container_of(node, struct tt_local_entry, hash_entry);
-
-	kfree(data);
-	bat_priv->num_local_tt--;
-	atomic_set(&bat_priv->tt_local_changed, 1);
-}
-
 static void tt_local_del(struct bat_priv *bat_priv,
-			  struct tt_local_entry *tt_local_entry,
-			  char *message)
+			 struct tt_local_entry *tt_local_entry,
+			 const char *message)
 {
-	bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
+	bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry (%pM): %s\n",
 		tt_local_entry->addr, message);
 
+	atomic_dec(&bat_priv->num_local_tt);
+
 	hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
 		    tt_local_entry->addr);
-	_tt_local_del(&tt_local_entry->hash_entry, bat_priv);
+
+	tt_local_entry_free_ref(tt_local_entry);
 }
 
-void tt_local_remove(struct bat_priv *bat_priv,
-		      uint8_t *addr, char *message)
+void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
+		     const char *message, bool roaming)
 {
-	struct tt_local_entry *tt_local_entry;
-
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+	struct tt_local_entry *tt_local_entry = NULL;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
-	if (tt_local_entry)
-		tt_local_del(bat_priv, tt_local_entry, message);
+	if (!tt_local_entry)
+		goto out;
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
+	tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr, roaming);
+	tt_local_del(bat_priv, tt_local_entry, message);
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
 }
 
-static void tt_local_purge(struct work_struct *work)
+static void tt_local_purge(struct bat_priv *bat_priv)
 {
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct bat_priv *bat_priv =
-		container_of(delayed_work, struct bat_priv, tt_work);
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
 	struct tt_local_entry *tt_local_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
-	unsigned long timeout;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
 	int i;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
-
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
+		spin_lock_bh(list_lock);
 		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
 					  head, hash_entry) {
 			if (tt_local_entry->never_purge)
 				continue;
 
-			timeout = tt_local_entry->last_seen;
-			timeout += TT_LOCAL_TIMEOUT * HZ;
-
-			if (time_before(jiffies, timeout))
+			if (!is_out_of_time(tt_local_entry->last_seen,
+					    TT_LOCAL_TIMEOUT * 1000))
 				continue;
 
-			tt_local_del(bat_priv, tt_local_entry,
-				      "address timed out");
+			tt_local_event(bat_priv, TT_CHANGE_DEL,
+				       tt_local_entry->addr, false);
+			atomic_dec(&bat_priv->num_local_tt);
+			bat_dbg(DBG_TT, bat_priv, "Deleting local "
+				"tt entry (%pM): timed out\n",
+				tt_local_entry->addr);
+			hlist_del_rcu(node);
+			tt_local_entry_free_ref(tt_local_entry);
 		}
+		spin_unlock_bh(list_lock);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-	tt_local_start_timer(bat_priv);
 }
 
-void tt_local_free(struct bat_priv *bat_priv)
+static void tt_local_table_free(struct bat_priv *bat_priv)
 {
+	struct hashtable_t *hash;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_local_entry *tt_local_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	int i;
+
 	if (!bat_priv->tt_local_hash)
 		return;
 
-	cancel_delayed_work_sync(&bat_priv->tt_work);
-	hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
+	hash = bat_priv->tt_local_hash;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+					  head, hash_entry) {
+			hlist_del_rcu(node);
+			tt_local_entry_free_ref(tt_local_entry);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	hash_destroy(hash);
+
 	bat_priv->tt_local_hash = NULL;
 }
 
-int tt_global_init(struct bat_priv *bat_priv)
+static int tt_global_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->tt_global_hash)
 		return 1;
@@ -407,74 +476,78 @@
 	return 1;
 }
 
-void tt_global_add_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 unsigned char *tt_buff, int tt_buff_len)
+static void tt_changes_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_change_node *entry, *safe;
+
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+				 list) {
+		list_del(&entry->list);
+		kfree(entry);
+	}
+
+	atomic_set(&bat_priv->tt_local_changes, 0);
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+}
+
+/* caller must hold orig_node refcount */
+int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		  const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
 {
 	struct tt_global_entry *tt_global_entry;
-	struct tt_local_entry *tt_local_entry;
-	int tt_buff_count = 0;
-	unsigned char *tt_ptr;
+	struct orig_node *orig_node_tmp;
+	int ret = 0;
 
-	while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
-		spin_lock_bh(&bat_priv->tt_ghash_lock);
+	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
-		tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
+	if (!tt_global_entry) {
+		tt_global_entry =
+			kmalloc(sizeof(*tt_global_entry),
+				GFP_ATOMIC);
+		if (!tt_global_entry)
+			goto out;
 
-		if (!tt_global_entry) {
-			spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
-			tt_global_entry =
-				kmalloc(sizeof(struct tt_global_entry),
-					GFP_ATOMIC);
-
-			if (!tt_global_entry)
-				break;
-
-			memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
-
-			bat_dbg(DBG_ROUTES, bat_priv,
-				"Creating new global tt entry: "
-				"%pM (via %pM)\n",
-				tt_global_entry->addr, orig_node->orig);
-
-			spin_lock_bh(&bat_priv->tt_ghash_lock);
-			hash_add(bat_priv->tt_global_hash, compare_gtt,
-				 choose_orig, tt_global_entry,
-				 &tt_global_entry->hash_entry);
-
-		}
-
+		memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+		/* Assign the new orig_node */
+		atomic_inc(&orig_node->refcount);
 		tt_global_entry->orig_node = orig_node;
-		spin_unlock_bh(&bat_priv->tt_ghash_lock);
+		tt_global_entry->ttvn = ttvn;
+		tt_global_entry->flags = NO_FLAGS;
+		tt_global_entry->roam_at = 0;
+		atomic_set(&tt_global_entry->refcount, 2);
 
-		/* remove address from local hash if present */
-		spin_lock_bh(&bat_priv->tt_lhash_lock);
-
-		tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
-
-		if (tt_local_entry)
-			tt_local_del(bat_priv, tt_local_entry,
-				      "global tt received");
-
-		spin_unlock_bh(&bat_priv->tt_lhash_lock);
-
-		tt_buff_count++;
-	}
-
-	/* initialize, and overwrite if malloc succeeds */
-	orig_node->tt_buff = NULL;
-	orig_node->tt_buff_len = 0;
-
-	if (tt_buff_len > 0) {
-		orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
-		if (orig_node->tt_buff) {
-			memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
-			orig_node->tt_buff_len = tt_buff_len;
+		hash_add(bat_priv->tt_global_hash, compare_gtt,
+			 choose_orig, tt_global_entry,
+			 &tt_global_entry->hash_entry);
+		atomic_inc(&orig_node->tt_size);
+	} else {
+		if (tt_global_entry->orig_node != orig_node) {
+			atomic_dec(&tt_global_entry->orig_node->tt_size);
+			orig_node_tmp = tt_global_entry->orig_node;
+			atomic_inc(&orig_node->refcount);
+			tt_global_entry->orig_node = orig_node;
+			orig_node_free_ref(orig_node_tmp);
+			atomic_inc(&orig_node->tt_size);
 		}
+		tt_global_entry->ttvn = ttvn;
+		tt_global_entry->flags = NO_FLAGS;
+		tt_global_entry->roam_at = 0;
 	}
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Creating new global tt entry: %pM (via %pM)\n",
+		tt_global_entry->addr, orig_node->orig);
+
+	/* remove address from local hash if present */
+	tt_local_remove(bat_priv, tt_global_entry->addr,
+			"global tt received", roaming);
+	ret = 1;
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
+	return ret;
 }
 
 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -508,26 +581,27 @@
 	seq_printf(seq,
 		   "Globally announced TT entries received via the mesh %s\n",
 		   net_dev->name);
-
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
+	seq_printf(seq, "       %-13s %s       %-15s %s\n",
+		   "Client", "(TTVN)", "Originator", "(Curr TTVN)");
 
 	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
+	/* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
+	 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
 		__hlist_for_each_rcu(node, head)
-			buf_size += 43;
+			buf_size += 59;
 		rcu_read_unlock();
 	}
 
 	buff = kmalloc(buf_size, GFP_ATOMIC);
 	if (!buff) {
-		spin_unlock_bh(&bat_priv->tt_ghash_lock);
 		ret = -ENOMEM;
 		goto out;
 	}
+
 	buff[0] = '\0';
 	pos = 0;
 
@@ -537,16 +611,18 @@
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(tt_global_entry, node,
 					 head, hash_entry) {
-			pos += snprintf(buff + pos, 44,
-					" * %pM via %pM\n",
+			pos += snprintf(buff + pos, 61,
+					" * %pM  (%3u) via %pM     (%3u)\n",
 					tt_global_entry->addr,
-					tt_global_entry->orig_node->orig);
+					tt_global_entry->ttvn,
+					tt_global_entry->orig_node->orig,
+					(uint8_t) atomic_read(
+						&tt_global_entry->orig_node->
+						last_ttvn));
 		}
 		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
 	seq_printf(seq, "%s", buff);
 	kfree(buff);
 out:
@@ -555,84 +631,994 @@
 	return ret;
 }
 
-static void _tt_global_del_orig(struct bat_priv *bat_priv,
-				 struct tt_global_entry *tt_global_entry,
-				 char *message)
+static void _tt_global_del(struct bat_priv *bat_priv,
+			   struct tt_global_entry *tt_global_entry,
+			   const char *message)
 {
-	bat_dbg(DBG_ROUTES, bat_priv,
+	if (!tt_global_entry)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv,
 		"Deleting global tt entry %pM (via %pM): %s\n",
 		tt_global_entry->addr, tt_global_entry->orig_node->orig,
 		message);
 
+	atomic_dec(&tt_global_entry->orig_node->tt_size);
+
 	hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
 		    tt_global_entry->addr);
-	kfree(tt_global_entry);
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
+}
+
+void tt_global_del(struct bat_priv *bat_priv,
+		   struct orig_node *orig_node, const unsigned char *addr,
+		   const char *message, bool roaming)
+{
+	struct tt_global_entry *tt_global_entry = NULL;
+
+	tt_global_entry = tt_global_hash_find(bat_priv, addr);
+	if (!tt_global_entry)
+		goto out;
+
+	if (tt_global_entry->orig_node == orig_node) {
+		if (roaming) {
+			tt_global_entry->flags |= TT_CLIENT_ROAM;
+			tt_global_entry->roam_at = jiffies;
+			goto out;
+		}
+		_tt_global_del(bat_priv, tt_global_entry, message);
+	}
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, char *message)
+			struct orig_node *orig_node, const char *message)
 {
 	struct tt_global_entry *tt_global_entry;
-	int tt_buff_count = 0;
-	unsigned char *tt_ptr;
+	int i;
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct hlist_node *node, *safe;
+	struct hlist_head *head;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
 
-	if (orig_node->tt_buff_len == 0)
-		return;
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, safe,
+					 head, hash_entry) {
+			if (tt_global_entry->orig_node == orig_node) {
+				bat_dbg(DBG_TT, bat_priv,
+					"Deleting global tt entry %pM "
+					"(via %pM): originator time out\n",
+					tt_global_entry->addr,
+					tt_global_entry->orig_node->orig);
+				hlist_del_rcu(node);
+				tt_global_entry_free_ref(tt_global_entry);
+			}
+		}
+		spin_unlock_bh(list_lock);
+	}
+	atomic_set(&orig_node->tt_size, 0);
+}
 
-	while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
-		tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
+static void tt_global_roam_purge(struct bat_priv *bat_priv)
+{
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	int i;
 
-		if ((tt_global_entry) &&
-		    (tt_global_entry->orig_node == orig_node))
-			_tt_global_del_orig(bat_priv, tt_global_entry,
-					     message);
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
-		tt_buff_count++;
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+					  head, hash_entry) {
+			if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+				continue;
+			if (!is_out_of_time(tt_global_entry->roam_at,
+					    TT_CLIENT_ROAM_TIMEOUT * 1000))
+				continue;
+
+			bat_dbg(DBG_TT, bat_priv, "Deleting global "
+				"tt entry (%pM): Roaming timeout\n",
+				tt_global_entry->addr);
+			atomic_dec(&tt_global_entry->orig_node->tt_size);
+			hlist_del_rcu(node);
+			tt_global_entry_free_ref(tt_global_entry);
+		}
+		spin_unlock_bh(list_lock);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
-	orig_node->tt_buff_len = 0;
-	kfree(orig_node->tt_buff);
-	orig_node->tt_buff = NULL;
 }
 
-static void tt_global_del(struct hlist_node *node, void *arg)
+static void tt_global_table_free(struct bat_priv *bat_priv)
 {
-	void *data = container_of(node, struct tt_global_entry, hash_entry);
+	struct hashtable_t *hash;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	int i;
 
-	kfree(data);
-}
-
-void tt_global_free(struct bat_priv *bat_priv)
-{
 	if (!bat_priv->tt_global_hash)
 		return;
 
-	hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
+	hash = bat_priv->tt_global_hash;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+					  head, hash_entry) {
+			hlist_del_rcu(node);
+			tt_global_entry_free_ref(tt_global_entry);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	hash_destroy(hash);
+
 	bat_priv->tt_global_hash = NULL;
 }
 
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
+struct orig_node *transtable_search(struct bat_priv *bat_priv,
+				    const uint8_t *addr)
 {
 	struct tt_global_entry *tt_global_entry;
 	struct orig_node *orig_node = NULL;
 
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
 
 	if (!tt_global_entry)
 		goto out;
 
 	if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
-		goto out;
+		goto free_tt;
 
 	orig_node = tt_global_entry->orig_node;
 
+free_tt:
+	tt_global_entry_free_ref(tt_global_entry);
 out:
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
 	return orig_node;
 }
+
+/* Calculates the checksum of the local table of a given orig_node */
+uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+{
+	uint16_t total = 0, total_one;
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int i, j;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_global_entry, node,
+					 head, hash_entry) {
+			if (compare_eth(tt_global_entry->orig_node,
+					orig_node)) {
+				/* Roaming clients are in the global table for
+				 * consistency only. They don't have to be
+				 * taken into account while computing the
+				 * global crc */
+				if (tt_global_entry->flags & TT_CLIENT_ROAM)
+					continue;
+				total_one = 0;
+				for (j = 0; j < ETH_ALEN; j++)
+					total_one = crc16_byte(total_one,
+						tt_global_entry->addr[j]);
+				total ^= total_one;
+			}
+		}
+		rcu_read_unlock();
+	}
+
+	return total;
+}
+
+/* Calculates the checksum of the local table */
+uint16_t tt_local_crc(struct bat_priv *bat_priv)
+{
+	uint16_t total = 0, total_one;
+	struct hashtable_t *hash = bat_priv->tt_local_hash;
+	struct tt_local_entry *tt_local_entry;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int i, j;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_local_entry, node,
+					 head, hash_entry) {
+			total_one = 0;
+			for (j = 0; j < ETH_ALEN; j++)
+				total_one = crc16_byte(total_one,
+						   tt_local_entry->addr[j]);
+			total ^= total_one;
+		}
+		rcu_read_unlock();
+	}
+
+	return total;
+}
+
+static void tt_req_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_req_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		list_del(&node->list);
+		kfree(node);
+	}
+
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+}
+
+void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
+			 const unsigned char *tt_buff, uint8_t tt_num_changes)
+{
+	uint16_t tt_buff_len = tt_len(tt_num_changes);
+
+	/* Replace the old buffer only if I received something in the
+	 * last OGM (the OGM could carry no changes) */
+	spin_lock_bh(&orig_node->tt_buff_lock);
+	if (tt_buff_len > 0) {
+		kfree(orig_node->tt_buff);
+		orig_node->tt_buff_len = 0;
+		orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
+		if (orig_node->tt_buff) {
+			memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
+			orig_node->tt_buff_len = tt_buff_len;
+		}
+	}
+	spin_unlock_bh(&orig_node->tt_buff_lock);
+}
+
+static void tt_req_purge(struct bat_priv *bat_priv)
+{
+	struct tt_req_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		if (is_out_of_time(node->issued_at,
+		    TT_REQUEST_TIMEOUT * 1000)) {
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+}
+
+/* returns the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise */
+static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
+					  struct orig_node *orig_node)
+{
+	struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+		if (compare_eth(tt_req_node_tmp, orig_node) &&
+		    !is_out_of_time(tt_req_node_tmp->issued_at,
+				    TT_REQUEST_TIMEOUT * 1000))
+			goto unlock;
+	}
+
+	tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
+	if (!tt_req_node)
+		goto unlock;
+
+	memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
+	tt_req_node->issued_at = jiffies;
+
+	list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+unlock:
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	return tt_req_node;
+}
+
+static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
+{
+	const struct tt_global_entry *tt_global_entry = entry_ptr;
+	const struct orig_node *orig_node = data_ptr;
+
+	if (tt_global_entry->flags & TT_CLIENT_ROAM)
+		return 0;
+
+	return (tt_global_entry->orig_node == orig_node);
+}
+
+static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
+					      struct hashtable_t *hash,
+					      struct hard_iface *primary_if,
+					      int (*valid_cb)(const void *,
+							      const void *),
+					      void *cb_data)
+{
+	struct tt_local_entry *tt_local_entry;
+	struct tt_query_packet *tt_response;
+	struct tt_change *tt_change;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct sk_buff *skb = NULL;
+	uint16_t tt_tot, tt_count;
+	ssize_t tt_query_size = sizeof(struct tt_query_packet);
+	int i;
+
+	if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
+		tt_len = primary_if->soft_iface->mtu - tt_query_size;
+		tt_len -= tt_len % sizeof(struct tt_change);
+	}
+	tt_tot = tt_len / sizeof(struct tt_change);
+
+	skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+	tt_response = (struct tt_query_packet *)skb_put(skb,
+						     tt_query_size + tt_len);
+	tt_response->ttvn = ttvn;
+	tt_response->tt_data = htons(tt_tot);
+
+	tt_change = (struct tt_change *)(skb->data + tt_query_size);
+	tt_count = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		hlist_for_each_entry_rcu(tt_local_entry, node,
+					 head, hash_entry) {
+			if (tt_count == tt_tot)
+				break;
+
+			if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+				continue;
+
+			memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+			tt_change->flags = NO_FLAGS;
+
+			tt_count++;
+			tt_change++;
+		}
+	}
+	rcu_read_unlock();
+
+out:
+	return skb;
+}
+
+int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
+		    uint8_t ttvn, uint16_t tt_crc, bool full_table)
+{
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_request;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if;
+	struct tt_req_node *tt_req_node = NULL;
+	int ret = 1;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* The new tt_req will be issued only if I'm not waiting for a
+	 * reply from the same orig_node yet */
+	tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
+	if (!tt_req_node)
+		goto out;
+
+	skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+
+	tt_request = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet));
+
+	tt_request->packet_type = BAT_TT_QUERY;
+	tt_request->version = COMPAT_VERSION;
+	memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
+	tt_request->ttl = TTL;
+	tt_request->ttvn = ttvn;
+	tt_request->tt_data = tt_crc;
+	tt_request->flags = TT_REQUEST;
+
+	if (full_table)
+		tt_request->flags |= TT_FULL_TABLE;
+
+	neigh_node = orig_node_get_router(dst_orig_node);
+	if (!neigh_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
+		"[%c]\n", dst_orig_node->orig, neigh_node->addr,
+		(full_table ? 'F' : '.'));
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = 0;
+
+out:
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (ret)
+		kfree_skb(skb);
+	if (ret && tt_req_node) {
+		spin_lock_bh(&bat_priv->tt_req_list_lock);
+		list_del(&tt_req_node->list);
+		spin_unlock_bh(&bat_priv->tt_req_list_lock);
+		kfree(tt_req_node);
+	}
+	return ret;
+}
+
+static bool send_other_tt_response(struct bat_priv *bat_priv,
+				   struct tt_query_packet *tt_request)
+{
+	struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if = NULL;
+	uint8_t orig_ttvn, req_ttvn, ttvn;
+	int ret = false;
+	unsigned char *tt_buff;
+	bool full_table;
+	uint16_t tt_len, tt_tot;
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_response;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Received TT_REQUEST from %pM for "
+		"ttvn: %u (%pM) [%c]\n", tt_request->src,
+		tt_request->ttvn, tt_request->dst,
+		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	/* Let's get the orig node of the REAL destination */
+	req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+	if (!req_dst_orig_node)
+		goto out;
+
+	res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+	if (!res_dst_orig_node)
+		goto out;
+
+	neigh_node = orig_node_get_router(res_dst_orig_node);
+	if (!neigh_node)
+		goto out;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+	req_ttvn = tt_request->ttvn;
+
+	/* I have not the requested data */
+	if (orig_ttvn != req_ttvn ||
+	    tt_request->tt_data != req_dst_orig_node->tt_crc)
+		goto out;
+
+	/* If it has explicitly been requested the full table */
+	if (tt_request->flags & TT_FULL_TABLE ||
+	    !req_dst_orig_node->tt_buff)
+		full_table = true;
+	else
+		full_table = false;
+
+	/* In this version, fragmentation is not implemented, then
+	 * I'll send only one packet with as much TT entries as I can */
+	if (!full_table) {
+		spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
+		tt_len = req_dst_orig_node->tt_buff_len;
+		tt_tot = tt_len / sizeof(struct tt_change);
+
+		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
+				    tt_len + ETH_HLEN);
+		if (!skb)
+			goto unlock;
+
+		skb_reserve(skb, ETH_HLEN);
+		tt_response = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet) + tt_len);
+		tt_response->ttvn = req_ttvn;
+		tt_response->tt_data = htons(tt_tot);
+
+		tt_buff = skb->data + sizeof(struct tt_query_packet);
+		/* Copy the last orig_node's OGM buffer */
+		memcpy(tt_buff, req_dst_orig_node->tt_buff,
+		       req_dst_orig_node->tt_buff_len);
+
+		spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+	} else {
+		tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
+						sizeof(struct tt_change);
+		ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+
+		skb = tt_response_fill_table(tt_len, ttvn,
+					     bat_priv->tt_global_hash,
+					     primary_if, tt_global_valid_entry,
+					     req_dst_orig_node);
+		if (!skb)
+			goto out;
+
+		tt_response = (struct tt_query_packet *)skb->data;
+	}
+
+	tt_response->packet_type = BAT_TT_QUERY;
+	tt_response->version = COMPAT_VERSION;
+	tt_response->ttl = TTL;
+	memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
+	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
+	tt_response->flags = TT_RESPONSE;
+
+	if (full_table)
+		tt_response->flags |= TT_FULL_TABLE;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
+		res_dst_orig_node->orig, neigh_node->addr,
+		req_dst_orig_node->orig, req_ttvn);
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = true;
+	goto out;
+
+unlock:
+	spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+
+out:
+	if (res_dst_orig_node)
+		orig_node_free_ref(res_dst_orig_node);
+	if (req_dst_orig_node)
+		orig_node_free_ref(req_dst_orig_node);
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (!ret)
+		kfree_skb(skb);
+	return ret;
+
+}
+static bool send_my_tt_response(struct bat_priv *bat_priv,
+				struct tt_query_packet *tt_request)
+{
+	struct orig_node *orig_node = NULL;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if = NULL;
+	uint8_t my_ttvn, req_ttvn, ttvn;
+	int ret = false;
+	unsigned char *tt_buff;
+	bool full_table;
+	uint16_t tt_len, tt_tot;
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_response;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Received TT_REQUEST from %pM for "
+		"ttvn: %u (me) [%c]\n", tt_request->src,
+		tt_request->ttvn,
+		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+
+	my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+	req_ttvn = tt_request->ttvn;
+
+	orig_node = get_orig_node(bat_priv, tt_request->src);
+	if (!orig_node)
+		goto out;
+
+	neigh_node = orig_node_get_router(orig_node);
+	if (!neigh_node)
+		goto out;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* If the full table has been explicitly requested or the gap
+	 * is too big send the whole local translation table */
+	if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
+	    !bat_priv->tt_buff)
+		full_table = true;
+	else
+		full_table = false;
+
+	/* In this version, fragmentation is not implemented, then
+	 * I'll send only one packet with as much TT entries as I can */
+	if (!full_table) {
+		spin_lock_bh(&bat_priv->tt_buff_lock);
+		tt_len = bat_priv->tt_buff_len;
+		tt_tot = tt_len / sizeof(struct tt_change);
+
+		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
+				    tt_len + ETH_HLEN);
+		if (!skb)
+			goto unlock;
+
+		skb_reserve(skb, ETH_HLEN);
+		tt_response = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet) + tt_len);
+		tt_response->ttvn = req_ttvn;
+		tt_response->tt_data = htons(tt_tot);
+
+		tt_buff = skb->data + sizeof(struct tt_query_packet);
+		memcpy(tt_buff, bat_priv->tt_buff,
+		       bat_priv->tt_buff_len);
+		spin_unlock_bh(&bat_priv->tt_buff_lock);
+	} else {
+		tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
+						sizeof(struct tt_change);
+		ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+
+		skb = tt_response_fill_table(tt_len, ttvn,
+					     bat_priv->tt_local_hash,
+					     primary_if, NULL, NULL);
+		if (!skb)
+			goto out;
+
+		tt_response = (struct tt_query_packet *)skb->data;
+	}
+
+	tt_response->packet_type = BAT_TT_QUERY;
+	tt_response->version = COMPAT_VERSION;
+	tt_response->ttl = TTL;
+	memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
+	tt_response->flags = TT_RESPONSE;
+
+	if (full_table)
+		tt_response->flags |= TT_FULL_TABLE;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending TT_RESPONSE to %pM via %pM [%c]\n",
+		orig_node->orig, neigh_node->addr,
+		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = true;
+	goto out;
+
+unlock:
+	spin_unlock_bh(&bat_priv->tt_buff_lock);
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (!ret)
+		kfree_skb(skb);
+	/* This packet was for me, so it doesn't need to be re-routed */
+	return true;
+}
+
+bool send_tt_response(struct bat_priv *bat_priv,
+		      struct tt_query_packet *tt_request)
+{
+	if (is_my_mac(tt_request->dst))
+		return send_my_tt_response(bat_priv, tt_request);
+	else
+		return send_other_tt_response(bat_priv, tt_request);
+}
+
+static void _tt_update_changes(struct bat_priv *bat_priv,
+			       struct orig_node *orig_node,
+			       struct tt_change *tt_change,
+			       uint16_t tt_num_changes, uint8_t ttvn)
+{
+	int i;
+
+	for (i = 0; i < tt_num_changes; i++) {
+		if ((tt_change + i)->flags & TT_CHANGE_DEL)
+			tt_global_del(bat_priv, orig_node,
+				      (tt_change + i)->addr,
+				      "tt removed by changes",
+				      (tt_change + i)->flags & TT_CLIENT_ROAM);
+		else
+			if (!tt_global_add(bat_priv, orig_node,
+					   (tt_change + i)->addr, ttvn, false))
+				/* In case of problem while storing a
+				 * global_entry, we stop the updating
+				 * procedure without committing the
+				 * ttvn change. This will avoid to send
+				 * corrupted data on tt_request
+				 */
+				return;
+	}
+}
+
+static void tt_fill_gtable(struct bat_priv *bat_priv,
+			   struct tt_query_packet *tt_response)
+{
+	struct orig_node *orig_node = NULL;
+
+	orig_node = orig_hash_find(bat_priv, tt_response->src);
+	if (!orig_node)
+		goto out;
+
+	/* Purge the old table first.. */
+	tt_global_del_orig(bat_priv, orig_node, "Received full table");
+
+	_tt_update_changes(bat_priv, orig_node,
+			   (struct tt_change *)(tt_response + 1),
+			   tt_response->tt_data, tt_response->ttvn);
+
+	spin_lock_bh(&orig_node->tt_buff_lock);
+	kfree(orig_node->tt_buff);
+	orig_node->tt_buff_len = 0;
+	orig_node->tt_buff = NULL;
+	spin_unlock_bh(&orig_node->tt_buff_lock);
+
+	atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+}
+
+void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		       uint16_t tt_num_changes, uint8_t ttvn,
+		       struct tt_change *tt_change)
+{
+	_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
+			   ttvn);
+
+	tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
+			    tt_num_changes);
+	atomic_set(&orig_node->last_ttvn, ttvn);
+}
+
+bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
+{
+	struct tt_local_entry *tt_local_entry = NULL;
+	bool ret = false;
+
+	tt_local_entry = tt_local_hash_find(bat_priv, addr);
+	if (!tt_local_entry)
+		goto out;
+	ret = true;
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
+	return ret;
+}
+
+void handle_tt_response(struct bat_priv *bat_priv,
+			struct tt_query_packet *tt_response)
+{
+	struct tt_req_node *node, *safe;
+	struct orig_node *orig_node = NULL;
+
+	bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
+		"ttvn %d t_size: %d [%c]\n",
+		tt_response->src, tt_response->ttvn,
+		tt_response->tt_data,
+		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	orig_node = orig_hash_find(bat_priv, tt_response->src);
+	if (!orig_node)
+		goto out;
+
+	if (tt_response->flags & TT_FULL_TABLE)
+		tt_fill_gtable(bat_priv, tt_response);
+	else
+		tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
+				  tt_response->ttvn,
+				  (struct tt_change *)(tt_response + 1));
+
+	/* Delete the tt_req_node from pending tt_requests list */
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		if (!compare_eth(node->addr, tt_response->src))
+			continue;
+		list_del(&node->list);
+		kfree(node);
+	}
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+
+	/* Recalculate the CRC for this orig_node and store it */
+	orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+	/* Roaming phase is over: tables are in sync again. I can
+	 * unset the flag */
+	orig_node->tt_poss_change = false;
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+}
+
+int tt_init(struct bat_priv *bat_priv)
+{
+	if (!tt_local_init(bat_priv))
+		return 0;
+
+	if (!tt_global_init(bat_priv))
+		return 0;
+
+	tt_start_timer(bat_priv);
+
+	return 1;
+}
+
+static void tt_roam_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_roam_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+		list_del(&node->list);
+		kfree(node);
+	}
+
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+static void tt_roam_purge(struct bat_priv *bat_priv)
+{
+	struct tt_roam_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+		if (!is_out_of_time(node->first_time,
+				    ROAMING_MAX_TIME * 1000))
+			continue;
+
+		list_del(&node->list);
+		kfree(node);
+	}
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+/* This function checks whether the client already reached the
+ * maximum number of possible roaming phases. In this case the ROAMING_ADV
+ * will not be sent.
+ *
+ * returns true if the ROAMING_ADV can be sent, false otherwise */
+static bool tt_check_roam_count(struct bat_priv *bat_priv,
+				uint8_t *client)
+{
+	struct tt_roam_node *tt_roam_node;
+	bool ret = false;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	/* The new tt_req will be issued only if I'm not waiting for a
+	 * reply from the same orig_node yet */
+	list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+		if (!compare_eth(tt_roam_node->addr, client))
+			continue;
+
+		if (is_out_of_time(tt_roam_node->first_time,
+				   ROAMING_MAX_TIME * 1000))
+			continue;
+
+		if (!atomic_dec_not_zero(&tt_roam_node->counter))
+			/* Sorry, you roamed too many times! */
+			goto unlock;
+		ret = true;
+		break;
+	}
+
+	if (!ret) {
+		tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
+		if (!tt_roam_node)
+			goto unlock;
+
+		tt_roam_node->first_time = jiffies;
+		atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
+		memcpy(tt_roam_node->addr, client, ETH_ALEN);
+
+		list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+		ret = true;
+	}
+
+unlock:
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+	return ret;
+}
+
+void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+		   struct orig_node *orig_node)
+{
+	struct neigh_node *neigh_node = NULL;
+	struct sk_buff *skb = NULL;
+	struct roam_adv_packet *roam_adv_packet;
+	int ret = 1;
+	struct hard_iface *primary_if;
+
+	/* before going on we have to check whether the client has
+	 * already roamed to us too many times */
+	if (!tt_check_roam_count(bat_priv, client))
+		goto out;
+
+	skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+
+	roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
+					sizeof(struct roam_adv_packet));
+
+	roam_adv_packet->packet_type = BAT_ROAM_ADV;
+	roam_adv_packet->version = COMPAT_VERSION;
+	roam_adv_packet->ttl = TTL;
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+	memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	hardif_free_ref(primary_if);
+	memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
+	memcpy(roam_adv_packet->client, client, ETH_ALEN);
+
+	neigh_node = orig_node_get_router(orig_node);
+	if (!neigh_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
+		orig_node->orig, client, neigh_node->addr);
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = 0;
+
+out:
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (ret)
+		kfree_skb(skb);
+	return;
+}
+
+static void tt_purge(struct work_struct *work)
+{
+	struct delayed_work *delayed_work =
+		container_of(work, struct delayed_work, work);
+	struct bat_priv *bat_priv =
+		container_of(delayed_work, struct bat_priv, tt_work);
+
+	tt_local_purge(bat_priv);
+	tt_global_roam_purge(bat_priv);
+	tt_req_purge(bat_priv);
+	tt_roam_purge(bat_priv);
+
+	tt_start_timer(bat_priv);
+}
+
+void tt_free(struct bat_priv *bat_priv)
+{
+	cancel_delayed_work_sync(&bat_priv->tt_work);
+
+	tt_local_table_free(bat_priv);
+	tt_global_table_free(bat_priv);
+	tt_req_list_free(bat_priv);
+	tt_changes_list_free(bat_priv);
+	tt_roam_list_free(bat_priv);
+
+	kfree(bat_priv->tt_buff);
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 46152c3..1cd2d39 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,22 +22,45 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-int tt_local_init(struct bat_priv *bat_priv);
-void tt_local_add(struct net_device *soft_iface, uint8_t *addr);
+int tt_len(int changes_num);
+int tt_changes_fill_buffer(struct bat_priv *bat_priv,
+			   unsigned char *buff, int buff_len);
+int tt_init(struct bat_priv *bat_priv);
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
 void tt_local_remove(struct bat_priv *bat_priv,
-		      uint8_t *addr, char *message);
-int tt_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len);
+		     const uint8_t *addr, const char *message, bool roaming);
 int tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void tt_local_free(struct bat_priv *bat_priv);
-int tt_global_init(struct bat_priv *bat_priv);
 void tt_global_add_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 unsigned char *tt_buff, int tt_buff_len);
+			struct orig_node *orig_node,
+			const unsigned char *tt_buff, int tt_buff_len);
+int tt_global_add(struct bat_priv *bat_priv,
+		  struct orig_node *orig_node, const unsigned char *addr,
+		  uint8_t ttvn, bool roaming);
 int tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void tt_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, char *message);
-void tt_global_free(struct bat_priv *bat_priv);
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
+			struct orig_node *orig_node, const char *message);
+void tt_global_del(struct bat_priv *bat_priv,
+		   struct orig_node *orig_node, const unsigned char *addr,
+		   const char *message, bool roaming);
+struct orig_node *transtable_search(struct bat_priv *bat_priv,
+				    const uint8_t *addr);
+void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
+			 const unsigned char *tt_buff, uint8_t tt_num_changes);
+uint16_t tt_local_crc(struct bat_priv *bat_priv);
+uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
+void tt_free(struct bat_priv *bat_priv);
+int send_tt_request(struct bat_priv *bat_priv,
+		    struct orig_node *dst_orig_node, uint8_t hvn,
+		    uint16_t tt_crc, bool full_table);
+bool send_tt_response(struct bat_priv *bat_priv,
+		      struct tt_query_packet *tt_request);
+void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		       uint16_t tt_num_changes, uint8_t ttvn,
+		       struct tt_change *tt_change);
+bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
+void handle_tt_response(struct bat_priv *bat_priv,
+			struct tt_query_packet *tt_response);
+void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+		   struct orig_node *orig_node);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index fab70e8..85cf122 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -75,8 +75,18 @@
 	unsigned long batman_seqno_reset;
 	uint8_t gw_flags;
 	uint8_t flags;
+	atomic_t last_ttvn; /* last seen translation table version number */
+	uint16_t tt_crc;
 	unsigned char *tt_buff;
 	int16_t tt_buff_len;
+	spinlock_t tt_buff_lock; /* protects tt_buff */
+	atomic_t tt_size;
+	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
+	 * If true, then I sent a Roaming_adv to this orig_node and I have to
+	 * inspect every packet directed to it to check whether it is still
+	 * the true destination or not. This flag will be reset to false as
+	 * soon as I receive a new TTVN from this orig_node */
+	bool tt_poss_change;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
 	unsigned long bcast_bits[NUM_WORDS];
@@ -94,6 +104,7 @@
 	spinlock_t ogm_cnt_lock;
 	/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
 	spinlock_t bcast_seqno_lock;
+	spinlock_t tt_list_lock; /* protects tt_list */
 	atomic_t bond_candidates;
 	struct list_head bond_list;
 };
@@ -145,6 +156,15 @@
 	atomic_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
+	atomic_t ttvn; /* tranlation table version number */
+	atomic_t tt_ogm_append_cnt;
+	atomic_t tt_local_changes; /* changes registered in a OGM interval */
+	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
+	 * If true, then I received a Roaming_adv and I have to inspect every
+	 * packet directed to me to check whether I am still the true
+	 * destination or not. This flag will be reset to false as soon as I
+	 * increase my TTVN */
+	bool tt_poss_change;
 	char num_ifaces;
 	struct debug_log *debug_log;
 	struct kobject *mesh_obj;
@@ -153,26 +173,35 @@
 	struct hlist_head forw_bcast_list;
 	struct hlist_head gw_list;
 	struct hlist_head softif_neigh_vids;
+	struct list_head tt_changes_list; /* tracks changes in a OGM int */
 	struct list_head vis_send_list;
 	struct hashtable_t *orig_hash;
 	struct hashtable_t *tt_local_hash;
 	struct hashtable_t *tt_global_hash;
+	struct list_head tt_req_list; /* list of pending tt_requests */
+	struct list_head tt_roam_list;
 	struct hashtable_t *vis_hash;
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
 	spinlock_t forw_bcast_list_lock; /* protects  */
-	spinlock_t tt_lhash_lock; /* protects tt_local_hash */
-	spinlock_t tt_ghash_lock; /* protects tt_global_hash */
+	spinlock_t tt_changes_list_lock; /* protects tt_changes */
+	spinlock_t tt_req_list_lock; /* protects tt_req_list */
+	spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
 	spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
 	spinlock_t vis_hash_lock; /* protects vis_hash */
 	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
 	spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
 	spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
-	int16_t num_local_tt;
-	atomic_t tt_local_changed;
+	atomic_t num_local_tt;
+	/* Checksum of the local table, recomputed before sending a new OGM */
+	atomic_t tt_crc;
+	unsigned char *tt_buff;
+	int16_t tt_buff_len;
+	spinlock_t tt_buff_lock; /* protects tt_buff */
 	struct delayed_work tt_work;
 	struct delayed_work orig_work;
 	struct delayed_work vis_work;
 	struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
+	atomic_t gw_reselect;
 	struct hard_iface __rcu *primary_if;  /* rcu protected pointer */
 	struct vis_info *my_vis_info;
 };
@@ -196,13 +225,38 @@
 	uint8_t addr[ETH_ALEN];
 	unsigned long last_seen;
 	char never_purge;
+	atomic_t refcount;
+	struct rcu_head rcu;
 	struct hlist_node hash_entry;
 };
 
 struct tt_global_entry {
 	uint8_t addr[ETH_ALEN];
 	struct orig_node *orig_node;
-	struct hlist_node hash_entry;
+	uint8_t ttvn;
+	uint8_t flags; /* only TT_GLOBAL_ROAM is used */
+	unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+	atomic_t refcount;
+	struct rcu_head rcu;
+	struct hlist_node hash_entry; /* entry in the global table */
+};
+
+struct tt_change_node {
+	struct list_head list;
+	struct tt_change change;
+};
+
+struct tt_req_node {
+	uint8_t addr[ETH_ALEN];
+	unsigned long issued_at;
+	struct list_head list;
+};
+
+struct tt_roam_node {
+	uint8_t addr[ETH_ALEN];
+	atomic_t counter;
+	unsigned long first_time;
+	struct list_head list;
 };
 
 /**
@@ -246,10 +300,10 @@
 };
 
 struct vis_info {
-	unsigned long       first_seen;
-	struct list_head    recv_list;
-			    /* list of server-neighbors we received a vis-packet
-			     * from.  we should not reply to them. */
+	unsigned long first_seen;
+	/* list of server-neighbors we received a vis-packet
+	 * from.  we should not reply to them. */
+	struct list_head recv_list;
 	struct list_head send_list;
 	struct kref refcount;
 	struct hlist_node hash_entry;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19c3daf..32b125f 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,8 +39,8 @@
 		(struct unicast_frag_packet *)skb->data;
 	struct sk_buff *tmp_skb;
 	struct unicast_packet *unicast_packet;
-	int hdr_len = sizeof(struct unicast_packet);
-	int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+	int hdr_len = sizeof(*unicast_packet);
+	int uni_diff = sizeof(*up) - hdr_len;
 
 	/* set skb to the first part and tmp_skb to the second part */
 	if (up->flags & UNI_FRAG_HEAD) {
@@ -53,7 +53,7 @@
 	if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
 		goto err;
 
-	skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
+	skb_pull(tmp_skb, sizeof(*up));
 	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
 		goto err;
 
@@ -99,8 +99,7 @@
 	struct frag_packet_list_entry *tfp;
 
 	for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
-		tfp = kmalloc(sizeof(struct frag_packet_list_entry),
-			GFP_ATOMIC);
+		tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
 		if (!tfp) {
 			frag_list_free(head);
 			return -ENOMEM;
@@ -115,7 +114,7 @@
 }
 
 static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
-						 struct unicast_frag_packet *up)
+					   const struct unicast_frag_packet *up)
 {
 	struct frag_packet_list_entry *tfp;
 	struct unicast_frag_packet *tmp_up = NULL;
@@ -218,14 +217,14 @@
 }
 
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-		  struct hard_iface *hard_iface, uint8_t dstaddr[])
+		  struct hard_iface *hard_iface, const uint8_t dstaddr[])
 {
 	struct unicast_packet tmp_uc, *unicast_packet;
 	struct hard_iface *primary_if;
 	struct sk_buff *frag_skb;
 	struct unicast_frag_packet *frag1, *frag2;
-	int uc_hdr_len = sizeof(struct unicast_packet);
-	int ucf_hdr_len = sizeof(struct unicast_frag_packet);
+	int uc_hdr_len = sizeof(*unicast_packet);
+	int ucf_hdr_len = sizeof(*frag1);
 	int data_len = skb->len - uc_hdr_len;
 	int large_tail = 0, ret = NET_RX_DROP;
 	uint16_t seqno;
@@ -250,14 +249,14 @@
 	frag1 = (struct unicast_frag_packet *)skb->data;
 	frag2 = (struct unicast_frag_packet *)frag_skb->data;
 
-	memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet));
+	memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
 
 	frag1->ttl--;
 	frag1->version = COMPAT_VERSION;
 	frag1->packet_type = BAT_UNICAST_FRAG;
 
 	memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
+	memcpy(frag2, frag1, sizeof(*frag2));
 
 	if (data_len & 1)
 		large_tail = UNI_FRAG_LARGETAIL;
@@ -295,7 +294,7 @@
 
 	/* get routing information */
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
-		orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv);
+		orig_node = gw_get_selected_orig(bat_priv);
 		if (orig_node)
 			goto find_router;
 	}
@@ -314,10 +313,7 @@
 	if (!neigh_node)
 		goto out;
 
-	if (neigh_node->if_incoming->if_status != IF_ACTIVE)
-		goto out;
-
-	if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
+	if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
 		goto out;
 
 	unicast_packet = (struct unicast_packet *)skb->data;
@@ -329,9 +325,12 @@
 	unicast_packet->ttl = TTL;
 	/* copy the destination for faster routing */
 	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+	/* set the destination tt version number */
+	unicast_packet->ttvn =
+		(uint8_t)atomic_read(&orig_node->last_ttvn);
 
 	if (atomic_read(&bat_priv->fragmentation) &&
-	    data_len + sizeof(struct unicast_packet) >
+	    data_len + sizeof(*unicast_packet) >
 				neigh_node->if_incoming->net_dev->mtu) {
 		/* send frag skb decreases ttl */
 		unicast_packet->ttl++;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 16ad7a9..62f54b9 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -32,11 +32,11 @@
 void frag_list_free(struct list_head *head);
 int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-		  struct hard_iface *hard_iface, uint8_t dstaddr[]);
+		  struct hard_iface *hard_iface, const uint8_t dstaddr[]);
 
-static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
 {
-	struct unicast_frag_packet *unicast_packet;
+	const struct unicast_frag_packet *unicast_packet;
 	int uneven_correction = 0;
 	unsigned int merged_size;
 
@@ -49,7 +49,7 @@
 			uneven_correction = -1;
 	}
 
-	merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+	merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
 	merged_size += sizeof(struct unicast_packet) + uneven_correction;
 
 	return merged_size <= mtu;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c39f20c..8a1b985 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -30,22 +30,6 @@
 
 #define MAX_VIS_PACKET_SIZE 1000
 
-/* Returns the smallest signed integer in two's complement with the sizeof x */
-#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
-
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor */
-#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
-			_dummy > smallest_signed_int(_dummy); })
-#define seq_after(x, y) seq_before(y, x)
-
 static void start_vis_timer(struct bat_priv *bat_priv);
 
 /* free the info */
@@ -68,10 +52,10 @@
 }
 
 /* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(struct hlist_node *node, void *data2)
+static int vis_info_cmp(const struct hlist_node *node, const void *data2)
 {
-	struct vis_info *d1, *d2;
-	struct vis_packet *p1, *p2;
+	const struct vis_info *d1, *d2;
+	const struct vis_packet *p1, *p2;
 
 	d1 = container_of(node, struct vis_info, hash_entry);
 	d2 = data2;
@@ -82,11 +66,11 @@
 
 /* hash function to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(void *data, int size)
+static int vis_info_choose(const void *data, int size)
 {
-	struct vis_info *vis_info = data;
-	struct vis_packet *packet;
-	unsigned char *key;
+	const struct vis_info *vis_info = data;
+	const struct vis_packet *packet;
+	const unsigned char *key;
 	uint32_t hash = 0;
 	size_t i;
 
@@ -106,7 +90,7 @@
 }
 
 static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
-				      void *data)
+				      const void *data)
 {
 	struct hashtable_t *hash = bat_priv->vis_hash;
 	struct hlist_head *head;
@@ -143,7 +127,7 @@
 	struct hlist_node *pos;
 
 	hlist_for_each_entry(entry, pos, if_list, list) {
-		if (compare_eth(entry->addr, (void *)interface))
+		if (compare_eth(entry->addr, interface))
 			return;
 	}
 
@@ -156,7 +140,8 @@
 	hlist_add_head(&entry->list, if_list);
 }
 
-static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
+static ssize_t vis_data_read_prim_sec(char *buff,
+				      const struct hlist_head *if_list)
 {
 	struct if_list_entry *entry;
 	struct hlist_node *pos;
@@ -189,8 +174,9 @@
 }
 
 /* read an entry  */
-static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
-				   uint8_t *src, bool primary)
+static ssize_t vis_data_read_entry(char *buff,
+				   const struct vis_info_entry *entry,
+				   const uint8_t *src, bool primary)
 {
 	/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
 	if (primary && entry->quality == 0)
@@ -239,7 +225,7 @@
 		hlist_for_each_entry_rcu(info, node, head, hash_entry) {
 			packet = (struct vis_packet *)info->skb_packet->data;
 			entries = (struct vis_info_entry *)
-				((char *)packet + sizeof(struct vis_packet));
+				((char *)packet + sizeof(*packet));
 
 			for (j = 0; j < packet->entries; j++) {
 				if (entries[j].quality == 0)
@@ -287,7 +273,7 @@
 		hlist_for_each_entry_rcu(info, node, head, hash_entry) {
 			packet = (struct vis_packet *)info->skb_packet->data;
 			entries = (struct vis_info_entry *)
-				((char *)packet + sizeof(struct vis_packet));
+				((char *)packet + sizeof(*packet));
 
 			for (j = 0; j < packet->entries; j++) {
 				if (entries[j].quality == 0)
@@ -361,11 +347,11 @@
 
 /* tries to add one entry to the receive list. */
 static void recv_list_add(struct bat_priv *bat_priv,
-			  struct list_head *recv_list, char *mac)
+			  struct list_head *recv_list, const char *mac)
 {
 	struct recvlist_node *entry;
 
-	entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
+	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry)
 		return;
 
@@ -377,9 +363,9 @@
 
 /* returns 1 if this mac is in the recv_list */
 static int recv_list_is_in(struct bat_priv *bat_priv,
-			   struct list_head *recv_list, char *mac)
+			   const struct list_head *recv_list, const char *mac)
 {
-	struct recvlist_node *entry;
+	const struct recvlist_node *entry;
 
 	spin_lock_bh(&bat_priv->vis_list_lock);
 	list_for_each_entry(entry, recv_list, list) {
@@ -412,11 +398,11 @@
 		return NULL;
 
 	/* see if the packet is already in vis_hash */
-	search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
+	search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
 	if (!search_elem.skb_packet)
 		return NULL;
 	search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
-						     sizeof(struct vis_packet));
+						     sizeof(*search_packet));
 
 	memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
 	old_info = vis_hash_find(bat_priv, &search_elem);
@@ -442,27 +428,26 @@
 		kref_put(&old_info->refcount, free_info);
 	}
 
-	info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
+	info = kmalloc(sizeof(*info), GFP_ATOMIC);
 	if (!info)
 		return NULL;
 
-	info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
-					 vis_info_len + sizeof(struct ethhdr));
+	info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
+					 sizeof(struct ethhdr));
 	if (!info->skb_packet) {
 		kfree(info);
 		return NULL;
 	}
 	skb_reserve(info->skb_packet, sizeof(struct ethhdr));
-	packet = (struct vis_packet *)skb_put(info->skb_packet,
-					      sizeof(struct vis_packet) +
-					      vis_info_len);
+	packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
+					      + vis_info_len);
 
 	kref_init(&info->refcount);
 	INIT_LIST_HEAD(&info->send_list);
 	INIT_LIST_HEAD(&info->recv_list);
 	info->first_seen = jiffies;
 	info->bat_priv = bat_priv;
-	memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
+	memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len);
 
 	/* initialize and add new packet. */
 	*is_new = 1;
@@ -599,9 +584,9 @@
 }
 
 /* Return true if the vis packet is full. */
-static bool vis_packet_full(struct vis_info *info)
+static bool vis_packet_full(const struct vis_info *info)
 {
-	struct vis_packet *packet;
+	const struct vis_packet *packet;
 	packet = (struct vis_packet *)info->skb_packet->data;
 
 	if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
@@ -619,7 +604,7 @@
 	struct hlist_head *head;
 	struct orig_node *orig_node;
 	struct neigh_node *router;
-	struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
+	struct vis_info *info = bat_priv->my_vis_info;
 	struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
 	struct vis_info_entry *entry;
 	struct tt_local_entry *tt_local_entry;
@@ -632,7 +617,7 @@
 	packet->ttl = TTL;
 	packet->seqno = htonl(ntohl(packet->seqno) + 1);
 	packet->entries = 0;
-	skb_trim(info->skb_packet, sizeof(struct vis_packet));
+	skb_trim(info->skb_packet, sizeof(*packet));
 
 	if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
 		best_tq = find_best_vis_server(bat_priv, info);
@@ -680,11 +665,12 @@
 
 	hash = bat_priv->tt_local_hash;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
-		hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_local_entry, node, head,
+					 hash_entry) {
 			entry = (struct vis_info_entry *)
 					skb_put(info->skb_packet,
 						sizeof(*entry));
@@ -693,14 +679,12 @@
 			entry->quality = 0; /* 0 means TT */
 			packet->entries++;
 
-			if (vis_packet_full(info)) {
-				spin_unlock_bh(&bat_priv->tt_lhash_lock);
-				return 0;
-			}
+			if (vis_packet_full(info))
+				goto unlock;
 		}
+		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
 	return 0;
 
 unlock:
@@ -908,17 +892,15 @@
 		goto err;
 	}
 
-	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
-						sizeof(struct vis_packet) +
-						MAX_VIS_PACKET_SIZE +
-						sizeof(struct ethhdr));
+	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
+							  MAX_VIS_PACKET_SIZE +
+							 sizeof(struct ethhdr));
 	if (!bat_priv->my_vis_info->skb_packet)
 		goto free_info;
 
 	skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
-	packet = (struct vis_packet *)skb_put(
-					bat_priv->my_vis_info->skb_packet,
-					sizeof(struct vis_packet));
+	packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
+					      sizeof(*packet));
 
 	/* prefill the vis info */
 	bat_priv->my_vis_info->first_seen = jiffies -
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 815269b..e937ada 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1519,7 +1519,7 @@
 
 		data += (count - rem);
 		count = rem;
-	};
+	}
 
 	return rem;
 }
@@ -1554,7 +1554,7 @@
 
 		data += (count - rem);
 		count = rem;
-	};
+	}
 
 	return rem;
 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ffb0dc4..6814083 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -218,19 +218,24 @@
 	if (err < 0)
 		goto err1;
 
-	err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
+	err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL,
+			      br_dump_ifinfo, NULL);
 	if (err)
 		goto err2;
-	err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
+	err = __rtnl_register(PF_BRIDGE, RTM_SETLINK,
+			      br_rtm_setlink, NULL, NULL);
 	if (err)
 		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
+	err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
+			      br_fdb_add, NULL, NULL);
 	if (err)
 		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
+	err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
+			      br_fdb_delete, NULL, NULL);
 	if (err)
 		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
+	err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
+			      NULL, br_fdb_dump, NULL);
 	if (err)
 		goto err3;
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index adbb424..c628a57 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -9,6 +9,7 @@
 
 #include <linux/version.h>
 #include <linux/fs.h>
+#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 094fc53..8ce926d 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -58,6 +58,7 @@
 #include <linux/skbuff.h>
 #include <linux/can.h>
 #include <linux/can/core.h>
+#include <linux/ratelimit.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
 
@@ -161,8 +162,8 @@
 		 * return the error code immediately.  Below we will
 		 * return -EPROTONOSUPPORT
 		 */
-		if (err && printk_ratelimit())
-			printk(KERN_ERR "can: request_module "
+		if (err)
+			printk_ratelimited(KERN_ERR "can: request_module "
 			       "(can-proto-%d) failed.\n", protocol);
 
 		cp = can_get_proto(protocol);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 184a657..d6c8ae5 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -43,6 +43,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/hrtimer.h>
 #include <linux/list.h>
 #include <linux/proc_fs.h>
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5a8009c..85f3bc0 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -444,7 +444,7 @@
 		goto err;
 
 	/* TODO ceph_crypto_key_decode should really take const input */
-	p = (void*)data;
+	p = (void *)data;
 	ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
 	if (ret < 0)
 		goto err_ckey;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9c58c1e..8efe850 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5867,8 +5867,6 @@
 
 	dev->gso_max_size = GSO_MAX_SIZE;
 
-	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
-	dev->ethtool_ntuple_list.count = 0;
 	INIT_LIST_HEAD(&dev->napi_list);
 	INIT_LIST_HEAD(&dev->unreg_list);
 	INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5932,9 +5930,6 @@
 	/* Flush device addresses */
 	dev_addr_flush(dev);
 
-	/* Clear ethtool n-tuple list */
-	ethtool_ntuple_flush(dev);
-
 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
 		netif_napi_del(p);
 
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index fd14116..b7c12a6 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -169,18 +169,6 @@
 }
 EXPORT_SYMBOL(ethtool_op_set_flags);
 
-void ethtool_ntuple_flush(struct net_device *dev)
-{
-	struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
-
-	list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
-		list_del(&fsc->list);
-		kfree(fsc);
-	}
-	dev->ethtool_ntuple_list.count = 0;
-}
-EXPORT_SYMBOL(ethtool_ntuple_flush);
-
 /* Handlers for each ethtool command */
 
 #define ETHTOOL_DEV_FEATURE_WORDS	1
@@ -865,34 +853,6 @@
 	return ret;
 }
 
-static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
-			struct ethtool_rx_ntuple_flow_spec *spec,
-			struct ethtool_rx_ntuple_flow_spec_container *fsc)
-{
-
-	/* don't add filters forever */
-	if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
-		/* free the container */
-		kfree(fsc);
-		return;
-	}
-
-	/* Copy the whole filter over */
-	fsc->fs.flow_type = spec->flow_type;
-	memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
-	memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
-
-	fsc->fs.vlan_tag = spec->vlan_tag;
-	fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
-	fsc->fs.data = spec->data;
-	fsc->fs.data_mask = spec->data_mask;
-	fsc->fs.action = spec->action;
-
-	/* add to the list */
-	list_add_tail_rcu(&fsc->list, &list->list);
-	list->count++;
-}
-
 /*
  * ethtool does not (or did not) set masks for flow parameters that are
  * not specified, so if both value and mask are 0 then this must be
@@ -930,8 +890,6 @@
 {
 	struct ethtool_rx_ntuple cmd;
 	const struct ethtool_ops *ops = dev->ethtool_ops;
-	struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
-	int ret;
 
 	if (!ops->set_rx_ntuple)
 		return -EOPNOTSUPP;
@@ -944,269 +902,7 @@
 
 	rx_ntuple_fix_masks(&cmd.fs);
 
-	/*
-	 * Cache filter in dev struct for GET operation only if
-	 * the underlying driver doesn't have its own GET operation, and
-	 * only if the filter was added successfully.  First make sure we
-	 * can allocate the filter, then continue if successful.
-	 */
-	if (!ops->get_rx_ntuple) {
-		fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
-		if (!fsc)
-			return -ENOMEM;
-	}
-
-	ret = ops->set_rx_ntuple(dev, &cmd);
-	if (ret) {
-		kfree(fsc);
-		return ret;
-	}
-
-	if (!ops->get_rx_ntuple)
-		__rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
-
-	return ret;
-}
-
-static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
-{
-	struct ethtool_gstrings gstrings;
-	const struct ethtool_ops *ops = dev->ethtool_ops;
-	struct ethtool_rx_ntuple_flow_spec_container *fsc;
-	u8 *data;
-	char *p;
-	int ret, i, num_strings = 0;
-
-	if (!ops->get_sset_count)
-		return -EOPNOTSUPP;
-
-	if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
-		return -EFAULT;
-
-	ret = ops->get_sset_count(dev, gstrings.string_set);
-	if (ret < 0)
-		return ret;
-
-	gstrings.len = ret;
-
-	data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
-	if (!data)
-		return -ENOMEM;
-
-	if (ops->get_rx_ntuple) {
-		/* driver-specific filter grab */
-		ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
-		goto copy;
-	}
-
-	/* default ethtool filter grab */
-	i = 0;
-	p = (char *)data;
-	list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
-		sprintf(p, "Filter %d:\n", i);
-		p += ETH_GSTRING_LEN;
-		num_strings++;
-
-		switch (fsc->fs.flow_type) {
-		case TCP_V4_FLOW:
-			sprintf(p, "\tFlow Type: TCP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case UDP_V4_FLOW:
-			sprintf(p, "\tFlow Type: UDP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case SCTP_V4_FLOW:
-			sprintf(p, "\tFlow Type: SCTP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case AH_ESP_V4_FLOW:
-			sprintf(p, "\tFlow Type: AH ESP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case ESP_V4_FLOW:
-			sprintf(p, "\tFlow Type: ESP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case IP_USER_FLOW:
-			sprintf(p, "\tFlow Type: Raw IP\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case IPV4_FLOW:
-			sprintf(p, "\tFlow Type: IPv4\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		default:
-			sprintf(p, "\tFlow Type: Unknown\n");
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			goto unknown_filter;
-		}
-
-		/* now the rest of the filters */
-		switch (fsc->fs.flow_type) {
-		case TCP_V4_FLOW:
-		case UDP_V4_FLOW:
-		case SCTP_V4_FLOW:
-			sprintf(p, "\tSrc IP addr: 0x%x\n",
-				fsc->fs.h_u.tcp_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSrc IP mask: 0x%x\n",
-				fsc->fs.m_u.tcp_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP addr: 0x%x\n",
-				fsc->fs.h_u.tcp_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP mask: 0x%x\n",
-				fsc->fs.m_u.tcp_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
-				fsc->fs.h_u.tcp_ip4_spec.psrc,
-				fsc->fs.m_u.tcp_ip4_spec.psrc);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
-				fsc->fs.h_u.tcp_ip4_spec.pdst,
-				fsc->fs.m_u.tcp_ip4_spec.pdst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tTOS: %d, mask: 0x%x\n",
-				fsc->fs.h_u.tcp_ip4_spec.tos,
-				fsc->fs.m_u.tcp_ip4_spec.tos);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case AH_ESP_V4_FLOW:
-		case ESP_V4_FLOW:
-			sprintf(p, "\tSrc IP addr: 0x%x\n",
-				fsc->fs.h_u.ah_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSrc IP mask: 0x%x\n",
-				fsc->fs.m_u.ah_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP addr: 0x%x\n",
-				fsc->fs.h_u.ah_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP mask: 0x%x\n",
-				fsc->fs.m_u.ah_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSPI: %d, mask: 0x%x\n",
-				fsc->fs.h_u.ah_ip4_spec.spi,
-				fsc->fs.m_u.ah_ip4_spec.spi);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tTOS: %d, mask: 0x%x\n",
-				fsc->fs.h_u.ah_ip4_spec.tos,
-				fsc->fs.m_u.ah_ip4_spec.tos);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case IP_USER_FLOW:
-			sprintf(p, "\tSrc IP addr: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSrc IP mask: 0x%x\n",
-				fsc->fs.m_u.usr_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP addr: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP mask: 0x%x\n",
-				fsc->fs.m_u.usr_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		case IPV4_FLOW:
-			sprintf(p, "\tSrc IP addr: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tSrc IP mask: 0x%x\n",
-				fsc->fs.m_u.usr_ip4_spec.ip4src);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP addr: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tDest IP mask: 0x%x\n",
-				fsc->fs.m_u.usr_ip4_spec.ip4dst);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
-				fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tTOS: %d, mask: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.tos,
-				fsc->fs.m_u.usr_ip4_spec.tos);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.ip_ver,
-				fsc->fs.m_u.usr_ip4_spec.ip_ver);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
-				fsc->fs.h_u.usr_ip4_spec.proto,
-				fsc->fs.m_u.usr_ip4_spec.proto);
-			p += ETH_GSTRING_LEN;
-			num_strings++;
-			break;
-		}
-		sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
-			fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
-		p += ETH_GSTRING_LEN;
-		num_strings++;
-		sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
-		p += ETH_GSTRING_LEN;
-		num_strings++;
-		sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
-		p += ETH_GSTRING_LEN;
-		num_strings++;
-		if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
-			sprintf(p, "\tAction: Drop\n");
-		else
-			sprintf(p, "\tAction: Direct to queue %d\n",
-				fsc->fs.action);
-		p += ETH_GSTRING_LEN;
-		num_strings++;
-unknown_filter:
-		i++;
-	}
-copy:
-	/* indicate to userspace how many strings we actually have */
-	gstrings.len = num_strings;
-	ret = -EFAULT;
-	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
-		goto out;
-	useraddr += sizeof(gstrings);
-	if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
-		goto out;
-	ret = 0;
-
-out:
-	kfree(data);
-	return ret;
+	return ops->set_rx_ntuple(dev, &cmd);
 }
 
 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
@@ -2101,9 +1797,6 @@
 	case ETHTOOL_SRXNTUPLE:
 		rc = ethtool_set_rx_ntuple(dev, useraddr);
 		break;
-	case ETHTOOL_GRXNTUPLE:
-		rc = ethtool_get_rx_ntuple(dev, useraddr);
-		break;
 	case ETHTOOL_GSSET_INFO:
 		rc = ethtool_get_sset_info(dev, useraddr);
 		break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 008dc70..e7ab0c0 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -740,9 +740,9 @@
 static int __init fib_rules_init(void)
 {
 	int err;
-	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
-	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
+	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
 
 	err = register_pernet_subsys(&fib_rules_net_ops);
 	if (err < 0)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 799f06e..ceb505b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2909,12 +2909,13 @@
 
 static int __init neigh_init(void)
 {
-	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
-	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
+	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
 
-	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
-	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
+		      NULL);
+	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
 
 	return 0;
 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index abd936d..a798fc6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -56,9 +56,11 @@
 struct rtnl_link {
 	rtnl_doit_func		doit;
 	rtnl_dumpit_func	dumpit;
+	rtnl_calcit_func 	calcit;
 };
 
 static DEFINE_MUTEX(rtnl_mutex);
+static u16 min_ifinfo_dump_size;
 
 void rtnl_lock(void)
 {
@@ -144,12 +146,28 @@
 	return tab ? tab[msgindex].dumpit : NULL;
 }
 
+static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
+{
+	struct rtnl_link *tab;
+
+	if (protocol <= RTNL_FAMILY_MAX)
+		tab = rtnl_msg_handlers[protocol];
+	else
+		tab = NULL;
+
+	if (tab == NULL || tab[msgindex].calcit == NULL)
+		tab = rtnl_msg_handlers[PF_UNSPEC];
+
+	return tab ? tab[msgindex].calcit : NULL;
+}
+
 /**
  * __rtnl_register - Register a rtnetlink message type
  * @protocol: Protocol family or PF_UNSPEC
  * @msgtype: rtnetlink message type
  * @doit: Function pointer called for each request message
  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @calcit: Function pointer to calc size of dump message
  *
  * Registers the specified function pointers (at least one of them has
  * to be non-NULL) to be called whenever a request message for the
@@ -162,7 +180,8 @@
  * Returns 0 on success or a negative error code.
  */
 int __rtnl_register(int protocol, int msgtype,
-		    rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+		    rtnl_calcit_func calcit)
 {
 	struct rtnl_link *tab;
 	int msgindex;
@@ -185,6 +204,9 @@
 	if (dumpit)
 		tab[msgindex].dumpit = dumpit;
 
+	if (calcit)
+		tab[msgindex].calcit = calcit;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__rtnl_register);
@@ -199,9 +221,10 @@
  * of memory implies no sense in continuing.
  */
 void rtnl_register(int protocol, int msgtype,
-		   rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+		   rtnl_calcit_func calcit)
 {
-	if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0)
+	if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
 		panic("Unable to register rtnetlink message handler, "
 		      "protocol = %d, message type = %d\n",
 		      protocol, msgtype);
@@ -1818,6 +1841,11 @@
 	return err;
 }
 
+static u16 rtnl_calcit(struct sk_buff *skb)
+{
+	return min_ifinfo_dump_size;
+}
+
 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	int idx;
@@ -1847,11 +1875,14 @@
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
+	size_t if_info_size;
 
-	skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
+	min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
+
 	err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
 	if (err < 0) {
 		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
@@ -1902,14 +1933,20 @@
 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
+		rtnl_calcit_func calcit;
+		u16 min_dump_alloc = 0;
 
 		dumpit = rtnl_get_dumpit(family, type);
 		if (dumpit == NULL)
 			return -EOPNOTSUPP;
+		calcit = rtnl_get_calcit(family, type);
+		if (calcit)
+			min_dump_alloc = calcit(skb);
 
 		__rtnl_unlock();
 		rtnl = net->rtnl;
-		err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+		err = netlink_dump_start(rtnl, skb, nlh, dumpit,
+					 NULL, min_dump_alloc);
 		rtnl_lock();
 		return err;
 	}
@@ -2019,12 +2056,13 @@
 	netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
 	register_netdevice_notifier(&rtnetlink_dev_notifier);
 
-	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
-	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
-	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
+		      rtnl_dump_ifinfo, rtnl_calcit);
+	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
 
-	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
-	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
+	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
 }
 
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 7e7ca37..98a5264 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -68,6 +68,7 @@
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
 
 void skb_complete_tx_timestamp(struct sk_buff *skb,
 			       struct skb_shared_hwtstamps *hwtstamps)
@@ -121,6 +122,7 @@
 
 	return false;
 }
+EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
 
 void __init skb_timestamping_init(void)
 {
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3609eac..ed1bb8c 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1819,8 +1819,8 @@
 {
 	INIT_LIST_HEAD(&dcb_app_list);
 
-	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
-	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
 
 	return 0;
 }
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cf26ac7..48530b4 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1313,7 +1313,7 @@
 
 	++*pos;
 
-	dev = (struct net_device *)v;
+	dev = v;
 	if (v == SEQ_START_TOKEN)
 		dev = net_device_entry(&init_net.dev_base_head);
 
@@ -1414,9 +1414,9 @@
 
 	dn_dev_devices_on();
 
-	rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL);
-	rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL);
-	rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr);
+	rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL);
+	rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL);
+	rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL);
 
 	proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
 
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 1c74ed3..104324d 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -763,8 +763,8 @@
 
 	register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
 
-	rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL);
-	rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL);
+	rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, NULL);
+	rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, NULL);
 }
 
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 74544bc..2949ca4 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1841,10 +1841,11 @@
 	proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
 
 #ifdef CONFIG_DECNET_ROUTER
-	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump);
+	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
+		      dn_fib_dump, NULL);
 #else
 	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
-		      dn_cache_dump);
+		      dn_cache_dump, NULL);
 #endif
 }
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index eae1f67..0600f0f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1438,11 +1438,11 @@
 unsigned long snmp_fold_field(void __percpu *mib[], int offt)
 {
 	unsigned long res = 0;
-	int i;
+	int i, j;
 
 	for_each_possible_cpu(i) {
-		res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
-		res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
+		for (j = 0; j < SNMP_ARRAY_SZ; j++)
+			res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
 	}
 	return res;
 }
@@ -1456,28 +1456,19 @@
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		void *bhptr, *userptr;
+		void *bhptr;
 		struct u64_stats_sync *syncp;
-		u64 v_bh, v_user;
+		u64 v;
 		unsigned int start;
 
-		/* first mib used by softirq context, we must use _bh() accessors */
-		bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
+		bhptr = per_cpu_ptr(mib[0], cpu);
 		syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
 		do {
 			start = u64_stats_fetch_begin_bh(syncp);
-			v_bh = *(((u64 *) bhptr) + offt);
+			v = *(((u64 *) bhptr) + offt);
 		} while (u64_stats_fetch_retry_bh(syncp, start));
 
-		/* second mib used in USER context */
-		userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
-		syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
-		do {
-			start = u64_stats_fetch_begin(syncp);
-			v_user = *(((u64 *) userptr) + offt);
-		} while (u64_stats_fetch_retry(syncp, start));
-
-		res += v_bh + v_user;
+		res += v;
 	}
 	return res;
 }
@@ -1489,25 +1480,28 @@
 	BUG_ON(ptr == NULL);
 	ptr[0] = __alloc_percpu(mibsize, align);
 	if (!ptr[0])
-		goto err0;
+		return -ENOMEM;
+#if SNMP_ARRAY_SZ == 2
 	ptr[1] = __alloc_percpu(mibsize, align);
-	if (!ptr[1])
-		goto err1;
+	if (!ptr[1]) {
+		free_percpu(ptr[0]);
+		ptr[0] = NULL;
+		return -ENOMEM;
+	}
+#endif
 	return 0;
-err1:
-	free_percpu(ptr[0]);
-	ptr[0] = NULL;
-err0:
-	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(snmp_mib_init);
 
-void snmp_mib_free(void __percpu *ptr[2])
+void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
 {
+	int i;
+
 	BUG_ON(ptr == NULL);
-	free_percpu(ptr[0]);
-	free_percpu(ptr[1]);
-	ptr[0] = ptr[1] = NULL;
+	for (i = 0; i < SNMP_ARRAY_SZ; i++) {
+		free_percpu(ptr[i]);
+		ptr[i] = NULL;
+	}
 }
 EXPORT_SYMBOL_GPL(snmp_mib_free);
 
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 0d4a184..37b3c18 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1833,8 +1833,8 @@
 
 	rtnl_af_register(&inet_af_ops);
 
-	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
-	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
-	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
+	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
+	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
+	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
 }
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2252471..92fc5f6 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1124,9 +1124,9 @@
 
 void __init ip_fib_init(void)
 {
-	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
-	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
-	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
+	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
 
 	register_pernet_subsys(&fib_net_ops);
 	register_netdevice_notifier(&fib_netdev_notifier);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 3267d38..389a2e6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -869,7 +869,7 @@
 		}
 
 		return netlink_dump_start(idiagnl, skb, nlh,
-					  inet_diag_dump, NULL);
+					  inet_diag_dump, NULL, 0);
 	}
 
 	return inet_diag_get_exact(skb, nlh);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index ce616d9..dafbf2c 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -54,15 +54,11 @@
  *  1.  Nodes may appear in the tree only with the pool lock held.
  *  2.  Nodes may disappear from the tree only with the pool lock held
  *      AND reference count being 0.
- *  3.  Nodes appears and disappears from unused node list only under
- *      "inet_peer_unused_lock".
- *  4.  Global variable peer_total is modified under the pool lock.
- *  5.  struct inet_peer fields modification:
+ *  3.  Global variable peer_total is modified under the pool lock.
+ *  4.  struct inet_peer fields modification:
  *		avl_left, avl_right, avl_parent, avl_height: pool lock
- *		unused: unused node list lock
  *		refcnt: atomically against modifications on other CPU;
  *		   usually under some other lock to prevent node disappearing
- *		dtime: unused node list lock
  *		daddr: unchangeable
  *		ip_id_count: atomic value (no lock needed)
  */
@@ -104,19 +100,6 @@
 					 * aggressively at this stage */
 int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
-int inet_peer_gc_mintime __read_mostly = 10 * HZ;
-int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
-
-static struct {
-	struct list_head	list;
-	spinlock_t		lock;
-} unused_peers = {
-	.list			= LIST_HEAD_INIT(unused_peers.list),
-	.lock			= __SPIN_LOCK_UNLOCKED(unused_peers.lock),
-};
-
-static void peer_check_expire(unsigned long dummy);
-static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
 
 
 /* Called from ip_output.c:ip_init  */
@@ -142,21 +125,6 @@
 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
 			NULL);
 
-	/* All the timers, started at system startup tend
-	   to synchronize. Perturb it a bit.
-	 */
-	peer_periodic_timer.expires = jiffies
-		+ net_random() % inet_peer_gc_maxtime
-		+ inet_peer_gc_maxtime;
-	add_timer(&peer_periodic_timer);
-}
-
-/* Called with or without local BH being disabled. */
-static void unlink_from_unused(struct inet_peer *p)
-{
-	spin_lock_bh(&unused_peers.lock);
-	list_del_init(&p->unused);
-	spin_unlock_bh(&unused_peers.lock);
 }
 
 static int addr_compare(const struct inetpeer_addr *a,
@@ -203,20 +171,6 @@
 	u;							\
 })
 
-static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
-{
-	int cur, old = atomic_read(ptr);
-
-	while (old != u) {
-		*newv = old + a;
-		cur = atomic_cmpxchg(ptr, old, *newv);
-		if (cur == old)
-			return true;
-		old = cur;
-	}
-	return false;
-}
-
 /*
  * Called with rcu_read_lock()
  * Because we hold no lock against a writer, its quite possible we fall
@@ -225,8 +179,7 @@
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
-				    struct inet_peer_base *base,
-				    int *newrefcnt)
+				    struct inet_peer_base *base)
 {
 	struct inet_peer *u = rcu_dereference(base->root);
 	int count = 0;
@@ -235,11 +188,9 @@
 		int cmp = addr_compare(daddr, &u->daddr);
 		if (cmp == 0) {
 			/* Before taking a reference, check if this entry was
-			 * deleted, unlink_from_pool() sets refcnt=-1 to make
-			 * distinction between an unused entry (refcnt=0) and
-			 * a freed one.
+			 * deleted (refcnt=-1)
 			 */
-			if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
+			if (!atomic_add_unless(&u->refcnt, 1, -1))
 				u = NULL;
 			return u;
 		}
@@ -366,137 +317,96 @@
 	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
 }
 
-/* May be called with local BH enabled. */
 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
 			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
 {
-	int do_free;
+	struct inet_peer __rcu ***stackptr, ***delp;
 
-	do_free = 0;
-
-	write_seqlock_bh(&base->lock);
-	/* Check the reference counter.  It was artificially incremented by 1
-	 * in cleanup() function to prevent sudden disappearing.  If we can
-	 * atomically (because of lockless readers) take this last reference,
-	 * it's safe to remove the node and free it later.
-	 * We use refcnt=-1 to alert lockless readers this entry is deleted.
-	 */
-	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
-		struct inet_peer __rcu ***stackptr, ***delp;
-		if (lookup(&p->daddr, stack, base) != p)
-			BUG();
-		delp = stackptr - 1; /* *delp[0] == p */
-		if (p->avl_left == peer_avl_empty_rcu) {
-			*delp[0] = p->avl_right;
-			--stackptr;
-		} else {
-			/* look for a node to insert instead of p */
-			struct inet_peer *t;
-			t = lookup_rightempty(p, base);
-			BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
-			**--stackptr = t->avl_left;
-			/* t is removed, t->daddr > x->daddr for any
-			 * x in p->avl_left subtree.
-			 * Put t in the old place of p. */
-			RCU_INIT_POINTER(*delp[0], t);
-			t->avl_left = p->avl_left;
-			t->avl_right = p->avl_right;
-			t->avl_height = p->avl_height;
-			BUG_ON(delp[1] != &p->avl_left);
-			delp[1] = &t->avl_left; /* was &p->avl_left */
-		}
-		peer_avl_rebalance(stack, stackptr, base);
-		base->total--;
-		do_free = 1;
+	if (lookup(&p->daddr, stack, base) != p)
+		BUG();
+	delp = stackptr - 1; /* *delp[0] == p */
+	if (p->avl_left == peer_avl_empty_rcu) {
+		*delp[0] = p->avl_right;
+		--stackptr;
+	} else {
+		/* look for a node to insert instead of p */
+		struct inet_peer *t;
+		t = lookup_rightempty(p, base);
+		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
+		**--stackptr = t->avl_left;
+		/* t is removed, t->daddr > x->daddr for any
+		 * x in p->avl_left subtree.
+		 * Put t in the old place of p. */
+		RCU_INIT_POINTER(*delp[0], t);
+		t->avl_left = p->avl_left;
+		t->avl_right = p->avl_right;
+		t->avl_height = p->avl_height;
+		BUG_ON(delp[1] != &p->avl_left);
+		delp[1] = &t->avl_left; /* was &p->avl_left */
 	}
-	write_sequnlock_bh(&base->lock);
-
-	if (do_free)
-		call_rcu(&p->rcu, inetpeer_free_rcu);
-	else
-		/* The node is used again.  Decrease the reference counter
-		 * back.  The loop "cleanup -> unlink_from_unused
-		 *   -> unlink_from_pool -> putpeer -> link_to_unused
-		 *   -> cleanup (for the same node)"
-		 * doesn't really exist because the entry will have a
-		 * recent deletion time and will not be cleaned again soon.
-		 */
-		inet_putpeer(p);
+	peer_avl_rebalance(stack, stackptr, base);
+	base->total--;
+	call_rcu(&p->rcu, inetpeer_free_rcu);
 }
 
 static struct inet_peer_base *family_to_base(int family)
 {
-	return (family == AF_INET ? &v4_peers : &v6_peers);
+	return family == AF_INET ? &v4_peers : &v6_peers;
 }
 
-static struct inet_peer_base *peer_to_base(struct inet_peer *p)
+/* perform garbage collect on all items stacked during a lookup */
+static int inet_peer_gc(struct inet_peer_base *base,
+			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
+			struct inet_peer __rcu ***stackptr)
 {
-	return family_to_base(p->daddr.family);
-}
+	struct inet_peer *p, *gchead = NULL;
+	__u32 delta, ttl;
+	int cnt = 0;
 
-/* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
-{
-	struct inet_peer *p = NULL;
-
-	/* Remove the first entry from the list of unused nodes. */
-	spin_lock_bh(&unused_peers.lock);
-	if (!list_empty(&unused_peers.list)) {
-		__u32 delta;
-
-		p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
+	if (base->total >= inet_peer_threshold)
+		ttl = 0; /* be aggressive */
+	else
+		ttl = inet_peer_maxttl
+				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
+					base->total / inet_peer_threshold * HZ;
+	stackptr--; /* last stack slot is peer_avl_empty */
+	while (stackptr > stack) {
+		stackptr--;
+		p = rcu_deref_locked(**stackptr, base);
 		delta = (__u32)jiffies - p->dtime;
-
-		if (delta < ttl) {
-			/* Do not prune fresh entries. */
-			spin_unlock_bh(&unused_peers.lock);
-			return -1;
+		if (atomic_read(&p->refcnt) == 0 && delta >= ttl &&
+		    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
+			p->gc_next = gchead;
+			gchead = p;
 		}
-
-		list_del_init(&p->unused);
-
-		/* Grab an extra reference to prevent node disappearing
-		 * before unlink_from_pool() call. */
-		atomic_inc(&p->refcnt);
 	}
-	spin_unlock_bh(&unused_peers.lock);
-
-	if (p == NULL)
-		/* It means that the total number of USED entries has
-		 * grown over inet_peer_threshold.  It shouldn't really
-		 * happen because of entry limits in route cache. */
-		return -1;
-
-	unlink_from_pool(p, peer_to_base(p), stack);
-	return 0;
+	while ((p = gchead) != NULL) {
+		gchead = p->gc_next;
+		cnt++;
+		unlink_from_pool(p, base, stack);
+	}
+	return cnt;
 }
 
-/* Called with or without local BH being disabled. */
 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 {
 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
 	struct inet_peer_base *base = family_to_base(daddr->family);
 	struct inet_peer *p;
 	unsigned int sequence;
-	int invalidated, newrefcnt = 0;
+	int invalidated, gccnt = 0;
 
-	/* Look up for the address quickly, lockless.
+	/* Attempt a lockless lookup first.
 	 * Because of a concurrent writer, we might not find an existing entry.
 	 */
 	rcu_read_lock();
 	sequence = read_seqbegin(&base->lock);
-	p = lookup_rcu(daddr, base, &newrefcnt);
+	p = lookup_rcu(daddr, base);
 	invalidated = read_seqretry(&base->lock, sequence);
 	rcu_read_unlock();
 
-	if (p) {
-found:		/* The existing node has been found.
-		 * Remove the entry from unused list if it was there.
-		 */
-		if (newrefcnt == 1)
-			unlink_from_unused(p);
+	if (p)
 		return p;
-	}
 
 	/* If no writer did a change during our lookup, we can return early. */
 	if (!create && !invalidated)
@@ -506,11 +416,17 @@
 	 * At least, nodes should be hot in our cache.
 	 */
 	write_seqlock_bh(&base->lock);
+relookup:
 	p = lookup(daddr, stack, base);
 	if (p != peer_avl_empty) {
-		newrefcnt = atomic_inc_return(&p->refcnt);
+		atomic_inc(&p->refcnt);
 		write_sequnlock_bh(&base->lock);
-		goto found;
+		return p;
+	}
+	if (!gccnt) {
+		gccnt = inet_peer_gc(base, stack, stackptr);
+		if (gccnt && create)
+			goto relookup;
 	}
 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
 	if (p) {
@@ -525,7 +441,6 @@
 		p->pmtu_expires = 0;
 		p->pmtu_orig = 0;
 		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
-		INIT_LIST_HEAD(&p->unused);
 
 
 		/* Link the node. */
@@ -534,63 +449,14 @@
 	}
 	write_sequnlock_bh(&base->lock);
 
-	if (base->total >= inet_peer_threshold)
-		/* Remove one less-recently-used entry. */
-		cleanup_once(0, stack);
-
 	return p;
 }
-
-static int compute_total(void)
-{
-	return v4_peers.total + v6_peers.total;
-}
 EXPORT_SYMBOL_GPL(inet_getpeer);
 
-/* Called with local BH disabled. */
-static void peer_check_expire(unsigned long dummy)
-{
-	unsigned long now = jiffies;
-	int ttl, total;
-	struct inet_peer __rcu **stack[PEER_MAXDEPTH];
-
-	total = compute_total();
-	if (total >= inet_peer_threshold)
-		ttl = inet_peer_minttl;
-	else
-		ttl = inet_peer_maxttl
-				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
-					total / inet_peer_threshold * HZ;
-	while (!cleanup_once(ttl, stack)) {
-		if (jiffies != now)
-			break;
-	}
-
-	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
-	 * interval depending on the total number of entries (more entries,
-	 * less interval). */
-	total = compute_total();
-	if (total >= inet_peer_threshold)
-		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
-	else
-		peer_periodic_timer.expires = jiffies
-			+ inet_peer_gc_maxtime
-			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
-				total / inet_peer_threshold * HZ;
-	add_timer(&peer_periodic_timer);
-}
-
 void inet_putpeer(struct inet_peer *p)
 {
-	local_bh_disable();
-
-	if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
-		list_add_tail(&p->unused, &unused_peers.list);
-		p->dtime = (__u32)jiffies;
-		spin_unlock(&unused_peers.lock);
-	}
-
-	local_bh_enable();
+	p->dtime = (__u32)jiffies;
+	atomic_dec(&p->refcnt);
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
 
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 30a7763..aae2bd8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2544,7 +2544,8 @@
 		goto add_proto_fail;
 	}
 #endif
-	rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
+	rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
+		      NULL, ipmr_rtm_dumproute, NULL);
 	return 0;
 
 #ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index aa13ef1..f24c335 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3303,7 +3303,7 @@
 	xfrm_init();
 	xfrm4_init(ip_rt_max_size);
 #endif
-	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
+	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
 
 #ifdef CONFIG_SYSCTL
 	register_pernet_subsys(&sysctl_route_ops);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 2646149..92bb943 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -316,6 +316,7 @@
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
+	treq->snt_synack	= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
 
 	/* We throwed the options of the initial SYN away, so we hope
 	 * the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 57d0752..69fd720 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -398,20 +398,6 @@
 		.proc_handler	= proc_dointvec_jiffies,
 	},
 	{
-		.procname	= "inet_peer_gc_mintime",
-		.data		= &inet_peer_gc_mintime,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_jiffies,
-	},
-	{
-		.procname	= "inet_peer_gc_maxtime",
-		.data		= &inet_peer_gc_maxtime,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_jiffies,
-	},
-	{
 		.procname	= "tcp_orphan_retries",
 		.data		= &sysctl_tcp_orphan_retries,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bef9f04..ea0d218 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -880,6 +880,11 @@
 		tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
+	} else {
+		/* ssthresh may have been reduced unnecessarily during.
+		 * 3WHS. Restore it back to its initial default.
+		 */
+		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 	}
 	if (dst_metric(dst, RTAX_REORDERING) &&
 	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
@@ -887,10 +892,7 @@
 		tp->reordering = dst_metric(dst, RTAX_REORDERING);
 	}
 
-	if (dst_metric(dst, RTAX_RTT) == 0)
-		goto reset;
-
-	if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
+	if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
 		goto reset;
 
 	/* Initial rtt is determined from SYN,SYN-ACK.
@@ -916,19 +918,26 @@
 		tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
 	}
 	tcp_set_rto(sk);
-	if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
 reset:
-		/* Play conservative. If timestamps are not
-		 * supported, TCP will fail to recalculate correct
-		 * rtt, if initial rto is too small. FORGET ALL AND RESET!
+	if (tp->srtt == 0) {
+		/* RFC2988bis: We've failed to get a valid RTT sample from
+		 * 3WHS. This is most likely due to retransmission,
+		 * including spurious one. Reset the RTO back to 3secs
+		 * from the more aggressive 1sec to avoid more spurious
+		 * retransmission.
 		 */
-		if (!tp->rx_opt.saw_tstamp && tp->srtt) {
-			tp->srtt = 0;
-			tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
-			inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
-		}
+		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 	}
-	tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+	 * retransmitted. In light of RFC2988bis' more aggressive 1sec
+	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+	 * retransmission has occurred.
+	 */
+	if (tp->total_retrans > 1)
+		tp->snd_cwnd = 1;
+	else
+		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -3112,12 +3121,13 @@
 	tcp_xmit_retransmit_queue(sk);
 }
 
-static void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
 {
 	tcp_rtt_estimator(sk, seq_rtt);
 	tcp_set_rto(sk);
 	inet_csk(sk)->icsk_backoff = 0;
 }
+EXPORT_SYMBOL(tcp_valid_rtt_meas);
 
 /* Read draft-ietf-tcplw-high-performance before mucking
  * with this code. (Supersedes RFC1323)
@@ -5806,12 +5816,6 @@
 					      tp->rx_opt.snd_wscale;
 				tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
-				/* tcp_ack considers this ACK as duplicate
-				 * and does not calculate rtt.
-				 * Force it here.
-				 */
-				tcp_ack_update_rtt(sk, 0, 0);
-
 				if (tp->rx_opt.tstamp_ok)
 					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 708dc20..955b8e6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -429,8 +429,8 @@
 			break;
 
 		icsk->icsk_backoff--;
-		inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
-					 icsk->icsk_backoff;
+		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
+			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
 		tcp_bound_rto(sk);
 
 		skb = tcp_write_queue_head(sk);
@@ -1384,6 +1384,7 @@
 		isn = tcp_v4_init_sequence(skb);
 	}
 	tcp_rsk(req)->snt_isn = isn;
+	tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
 	if (tcp_v4_send_synack(sk, dst, req,
 			       (struct request_values *)&tmp_ext) ||
@@ -1458,6 +1459,10 @@
 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
 	tcp_initialize_rcv_mss(newsk);
+	if (tcp_rsk(req)->snt_synack)
+		tcp_valid_rtt_meas(newsk,
+		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
+	newtp->total_retrans = req->retrans;
 
 #ifdef CONFIG_TCP_MD5SIG
 	/* Copy over the MD5 key from the original socket */
@@ -1855,7 +1860,7 @@
 	 * algorithms that we must have the following bandaid to talk
 	 * efficiently to them.  -DaveM
 	 */
-	tp->snd_cwnd = 2;
+	tp->snd_cwnd = TCP_INIT_CWND;
 
 	/* See draft-stevens-tcpca-spec-01 for discussion of the
 	 * initialization of these values.
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 80b1f80..d2fe4e0 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -486,7 +486,7 @@
 		 * algorithms that we must have the following bandaid to talk
 		 * efficiently to them.  -DaveM
 		 */
-		newtp->snd_cwnd = 2;
+		newtp->snd_cwnd = TCP_INIT_CWND;
 		newtp->snd_cwnd_cnt = 0;
 		newtp->bytes_acked = 0;
 
@@ -720,6 +720,10 @@
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
 		return NULL;
 	}
+	if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
+		tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
+	else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
+		tcp_rsk(req)->snt_synack = 0;
 
 	/* OK, ACK is valid, create big socket and
 	 * feed this segment to it. It will repeat all
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498b927..05838c7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1559,6 +1559,11 @@
 	return -1;
 }
 
+static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
+{
+	return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
+}
+
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
 {
 	switch (dev->type) {
@@ -1572,6 +1577,8 @@
 		return addrconf_ifid_infiniband(eui, dev);
 	case ARPHRD_SIT:
 		return addrconf_ifid_sit(eui, dev);
+	case ARPHRD_IPGRE:
+		return addrconf_ifid_gre(eui, dev);
 	}
 	return -1;
 }
@@ -2423,6 +2430,29 @@
 }
 #endif
 
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+static void addrconf_gre_config(struct net_device *dev)
+{
+	struct inet6_dev *idev;
+	struct in6_addr addr;
+
+	pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
+
+	ASSERT_RTNL();
+
+	if ((idev = ipv6_find_idev(dev)) == NULL) {
+		printk(KERN_DEBUG "init gre: add_dev failed\n");
+		return;
+	}
+
+	ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
+	addrconf_prefix_route(&addr, 64, dev, 0, 0);
+
+	if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
+		addrconf_add_linklocal(idev, &addr);
+}
+#endif
+
 static inline int
 ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
 {
@@ -2539,6 +2569,11 @@
 			addrconf_sit_config(dev);
 			break;
 #endif
+#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+		case ARPHRD_IPGRE:
+			addrconf_gre_config(dev);
+			break;
+#endif
 		case ARPHRD_TUNNEL6:
 			addrconf_ip6_tnl_config(dev);
 			break;
@@ -4692,16 +4727,20 @@
 	if (err < 0)
 		goto errout_af;
 
-	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
+	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
+			      NULL);
 	if (err < 0)
 		goto errout;
 
 	/* Only the first call to __rtnl_register can fail */
-	__rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL);
-	__rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL);
-	__rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, inet6_dump_ifaddr);
-	__rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, inet6_dump_ifmcaddr);
-	__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr);
+	__rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
+	__rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
+	__rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
+			inet6_dump_ifaddr, NULL);
+	__rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
+			inet6_dump_ifmcaddr, NULL);
+	__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
+			inet6_dump_ifacaddr, NULL);
 
 	ipv6_addr_label_rtnl_register();
 
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index c8993e5..2d8ddba 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -592,8 +592,11 @@
 
 void __init ipv6_addr_label_rtnl_register(void)
 {
-	__rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, NULL);
-	__rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, NULL);
-	__rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, ip6addrlbl_dump);
+	__rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel,
+			NULL, NULL);
+	__rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel,
+			NULL, NULL);
+	__rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get,
+			ip6addrlbl_dump, NULL);
 }
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4076a0b..3030bdf 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1586,7 +1586,8 @@
 	if (ret)
 		goto out_kmem_cache_create;
 
-	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib);
+	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
+			      NULL);
 	if (ret)
 		goto out_unregister_subsys;
 out:
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 82a8099..705c828 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1354,7 +1354,8 @@
 		goto add_proto_fail;
 	}
 #endif
-	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute);
+	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
+		      ip6mr_rtm_dumproute, NULL);
 	return 0;
 #ifdef CONFIG_IPV6_PIMSM_V2
 add_proto_fail:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index de2b1de..216ff31 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2925,9 +2925,9 @@
 		goto xfrm6_init;
 
 	ret = -ENOBUFS;
-	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
-	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
-	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
+	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
+	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
+	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
 		goto fib6_rules_init;
 
 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8b9644a..89d5bf8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -223,6 +223,7 @@
 	ireq->wscale_ok		= tcp_opt.wscale_ok;
 	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
 	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
+	treq->snt_synack	= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
 	treq->rcv_isn = ntohl(th->seq) - 1;
 	treq->snt_isn = cookie;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 87551ca..78aa534 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1341,6 +1341,7 @@
 	}
 have_isn:
 	tcp_rsk(req)->snt_isn = isn;
+	tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
 	security_inet_conn_request(sk, skb, req);
 
@@ -1509,6 +1510,10 @@
 	tcp_sync_mss(newsk, dst_mtu(dst));
 	newtp->advmss = dst_metric_advmss(dst);
 	tcp_initialize_rcv_mss(newsk);
+	if (tcp_rsk(req)->snt_synack)
+		tcp_valid_rtt_meas(newsk,
+		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
+	newtp->total_retrans = req->retrans;
 
 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index cc61697..c24f25a 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -369,7 +369,7 @@
 {
 	struct irda_sock *self;
 
-	self = (struct irda_sock *) priv;
+	self = priv;
 	if (!self) {
 		IRDA_WARNING("%s: lost myself!\n", __func__);
 		return;
@@ -418,7 +418,7 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__);
 
-	self = (struct irda_sock *) priv;
+	self = priv;
 	if (!self) {
 		IRDA_WARNING("%s: lost myself!\n", __func__);
 		return;
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 3c17540..b65d66e 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -382,7 +382,7 @@
 	info.daddr = discovery->daddr;
 	info.saddr = discovery->saddr;
 
-	self = (struct ircomm_tty_cb *) priv;
+	self = priv;
 	ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
 			    NULL, &info);
 }
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 25cc2e6..3eca35f 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -262,7 +262,7 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__);
 
-	task = (struct irda_task *) data;
+	task = data;
 
 	irda_task_kick(task);
 }
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index f876eed..e71e85b 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -305,7 +305,7 @@
 
 	IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
 
-	self = (struct iriap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -759,7 +759,7 @@
 {
 	struct iriap_cb *self;
 
-	self = (struct iriap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -791,7 +791,7 @@
 
 	IRDA_DEBUG(1, "%s()\n", __func__);
 
-	self = (struct iriap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(skb != NULL, return;);
 	IRDA_ASSERT(self != NULL, goto out;);
@@ -839,7 +839,7 @@
 
 	IRDA_DEBUG(3, "%s()\n", __func__);
 
-	self = (struct iriap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(skb != NULL, return 0;);
 	IRDA_ASSERT(self != NULL, goto out;);
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 7ed3af9..ba1a3fc 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -198,7 +198,7 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__ );
 
-	self = (struct irlan_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return -1;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -226,8 +226,8 @@
 
 	IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
 
-	self = (struct irlan_cb *) instance;
-	tsap = (struct tsap_cb *) sap;
+	self = instance;
+	tsap = sap;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -298,7 +298,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__ );
 
-	self = (struct irlan_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -542,7 +542,7 @@
 
 	IRDA_ASSERT(priv != NULL, return;);
 
-	self = (struct irlan_cb *) priv;
+	self = priv;
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
 
 	/* We probably don't need to make any more queries */
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 6130f9d..7791176 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -317,8 +317,8 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__ );
 
-	self = (struct irlan_cb *) instance;
-	tsap = (struct tsap_cb *) sap;
+	self = instance;
+	tsap = sap;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -361,7 +361,7 @@
 {
 	struct irlan_cb *self;
 
-	self = (struct irlan_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -406,8 +406,8 @@
 
 	IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason);
 
-	self = (struct irlan_cb *) instance;
-	tsap = (struct tsap_cb *) sap;
+	self = instance;
+	tsap = sap;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 8ee1ff6..e8d5f44 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -272,7 +272,7 @@
 	struct irlan_cb *self;
 	struct net_device *dev;
 
-	self = (struct irlan_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index b8af74a..8b61cf0 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -73,7 +73,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__ );
 
-	self = (struct irlan_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return -1;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -131,8 +131,8 @@
 
 	IRDA_DEBUG(0, "%s()\n", __func__ );
 
-	self = (struct irlan_cb *) instance;
-	tsap = (struct tsap_cb *) sap;
+	self = instance;
+	tsap = sap;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -182,8 +182,8 @@
 
 	IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
 
-	self = (struct irlan_cb *) instance;
-	tsap = (struct tsap_cb *) sap;
+	self = instance;
+	tsap = sap;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 9715e6e..f06947c 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -780,7 +780,7 @@
 	/*
 	 * Search for entry
 	 */
-	entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
+	entry = hashbin_find(hashbin, hashv, name);
 
 	/* Release lock */
 	spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
@@ -813,7 +813,7 @@
 	 * This allow to check if the current item is still in the
 	 * hashbin or has been removed.
 	 */
-	entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
+	entry = hashbin_find(hashbin, hashv, name);
 
 	/*
 	 * Trick hashbin_get_next() to return what we want
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 9d9af46..285ccd6 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -350,7 +350,7 @@
 {
 	struct tsap_cb *self;
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return -1;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -879,7 +879,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__);
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return -1;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -914,7 +914,7 @@
 	unsigned long flags;
 	int n;
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	n = skb->data[0] & 0x7f;     /* Extract the credits */
 
@@ -996,7 +996,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__);
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1025,7 +1025,7 @@
 {
 	struct tsap_cb *self;
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1208,7 +1208,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__);
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1292,13 +1292,13 @@
 	__u8 plen;
 	__u8 n;
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
 	IRDA_ASSERT(skb != NULL, return;);
 
-	lsap = (struct lsap_cb *) sap;
+	lsap = sap;
 
 	self->max_seg_size = max_seg_size - TTP_HEADER;
 	self->max_header_size = max_header_size+TTP_HEADER;
@@ -1602,7 +1602,7 @@
 
 	IRDA_DEBUG(4, "%s()\n", __func__);
 
-	self = (struct tsap_cb *) instance;
+	self = instance;
 
 	IRDA_ASSERT(self != NULL, return;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8f92cf8..1e733e9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -621,7 +621,7 @@
 	unsigned short family;
 	xfrm_address_t *xaddr;
 
-	sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+	sa = ext_hdrs[SADB_EXT_SA - 1];
 	if (sa == NULL)
 		return NULL;
 
@@ -630,7 +630,7 @@
 		return NULL;
 
 	/* sadb_address_len should be checked by caller */
-	addr = (const struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1];
+	addr = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
 	if (addr == NULL)
 		return NULL;
 
@@ -1039,7 +1039,7 @@
 	int err;
 
 
-	sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1];
+	sa = ext_hdrs[SADB_EXT_SA - 1];
 	if (!sa ||
 	    !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
 				     ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
@@ -1078,7 +1078,7 @@
 	     sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
 	    sa->sadb_sa_encrypt > SADB_EALG_MAX)
 		return ERR_PTR(-EINVAL);
-	key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+	key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
 	if (key != NULL &&
 	    sa->sadb_sa_auth != SADB_X_AALG_NULL &&
 	    ((key->sadb_key_bits+7) / 8 == 0 ||
@@ -1105,14 +1105,14 @@
 	if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
 		x->props.flags |= XFRM_STATE_NOPMTUDISC;
 
-	lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1];
+	lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD - 1];
 	if (lifetime != NULL) {
 		x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
 		x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
 		x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
 		x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
 	}
-	lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1];
+	lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT - 1];
 	if (lifetime != NULL) {
 		x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
 		x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
@@ -1120,7 +1120,7 @@
 		x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
 	}
 
-	sec_ctx = (const struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+	sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
 	if (sec_ctx != NULL) {
 		struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
 
@@ -1134,7 +1134,7 @@
 			goto out;
 	}
 
-	key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1];
+	key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
 	if (sa->sadb_sa_auth) {
 		int keysize = 0;
 		struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
@@ -2219,7 +2219,7 @@
 	if (xp->selector.dport)
 		xp->selector.dport_mask = htons(0xffff);
 
-	sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+	sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
 	if (sec_ctx != NULL) {
 		struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
 
@@ -2323,7 +2323,7 @@
 	if (sel.dport)
 		sel.dport_mask = htons(0xffff);
 
-	sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1];
+	sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
 	if (sec_ctx != NULL) {
 		struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index b6466e7..d21e7eb 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -480,18 +480,16 @@
 	if (connected)
 		rt = (struct rtable *) __sk_dst_check(sk, 0);
 
+	rcu_read_lock();
 	if (rt == NULL) {
-		struct ip_options_rcu *inet_opt;
+		const struct ip_options_rcu *inet_opt;
 
-		rcu_read_lock();
 		inet_opt = rcu_dereference(inet->inet_opt);
 
 		/* Use correct destination address if we have options. */
 		if (inet_opt && inet_opt->opt.srr)
 			daddr = inet_opt->opt.faddr;
 
-		rcu_read_unlock();
-
 		/* If this fails, retransmit mechanism of transport layer will
 		 * keep trying until route appears or the connection times
 		 * itself out.
@@ -503,12 +501,20 @@
 					   sk->sk_bound_dev_if);
 		if (IS_ERR(rt))
 			goto no_route;
-		sk_setup_caps(sk, &rt->dst);
+		if (connected)
+			sk_setup_caps(sk, &rt->dst);
+		else
+			dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
 	}
-	skb_dst_set(skb, dst_clone(&rt->dst));
+
+	/* We dont need to clone dst here, it is guaranteed to not disappear.
+	 *  __dev_xmit_skb() might force a refcount if needed.
+	 */
+	skb_dst_set_noref(skb, &rt->dst);
 
 	/* Queue the packet to IP for output */
 	rc = ip_queue_xmit(skb, &inet->cork.fl);
+	rcu_read_unlock();
 
 error:
 	/* Update stats */
@@ -525,6 +531,7 @@
 	return rc;
 
 no_route:
+	rcu_read_unlock();
 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 	kfree_skb(skb);
 	rc = -EHOSTUNREACH;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9c0d76c..89b0b2c 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -100,6 +100,21 @@
 	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
 
+void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+				  const u8 *addr)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct sta_info *sta = sta_info_get(sdata, addr);
+	int i;
+
+	for (i = 0; i < STA_TID_NUM; i++)
+		if (ba_rx_bitmap & BIT(i))
+			set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
+
+	ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
+}
+EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
+
 /*
  * After accepting the AddBA Request we activated a timer,
  * resetting it after each frame that arrives from the originator.
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index be70c70..6e56c6e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1554,6 +1554,19 @@
 
 	return local->ops->testmode_cmd(&local->hw, data, len);
 }
+
+static int ieee80211_testmode_dump(struct wiphy *wiphy,
+				   struct sk_buff *skb,
+				   struct netlink_callback *cb,
+				   void *data, int len)
+{
+	struct ieee80211_local *local = wiphy_priv(wiphy);
+
+	if (!local->ops->testmode_dump)
+		return -EOPNOTSUPP;
+
+	return local->ops->testmode_dump(&local->hw, skb, cb, data, len);
+}
 #endif
 
 int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -2134,6 +2147,7 @@
 	.set_wds_peer = ieee80211_set_wds_peer,
 	.rfkill_poll = ieee80211_rfkill_poll,
 	CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
+	CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump)
 	.set_power_mgmt = ieee80211_set_power_mgmt,
 	.set_bitrate_mask = ieee80211_set_bitrate_mask,
 	.remain_on_channel = ieee80211_remain_on_channel,
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 591add2..7cfc286 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -140,6 +140,12 @@
 				sta, tid, WLAN_BACK_RECIPIENT,
 				WLAN_REASON_QSTA_TIMEOUT, true);
 
+		if (test_and_clear_bit(tid,
+				       sta->ampdu_mlme.tid_rx_stop_requested))
+			___ieee80211_stop_rx_ba_session(
+				sta, tid, WLAN_BACK_RECIPIENT,
+				WLAN_REASON_UNSPECIFIED, true);
+
 		tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
 		if (tid_tx) {
 			/*
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8adac67..58a8955 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -532,12 +532,21 @@
 	mp->hw = hw;
 	mp->update_interval = 100;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+	mp->fixed_rate_idx = (u32) -1;
+	mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
+			S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
+#endif
+
 	return mp;
 }
 
 static void
 minstrel_free(void *priv)
 {
+#ifdef CONFIG_MAC80211_DEBUGFS
+	debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
+#endif
 	kfree(priv);
 }
 
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 0f5a833..5d278ec 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -78,6 +78,18 @@
 	unsigned int update_interval;
 	unsigned int lookaround_rate;
 	unsigned int lookaround_rate_mrr;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/*
+	 * enable fixed rate processing per RC
+	 *   - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx
+	 *   - write -1 to enable RC processing again
+	 *   - setting will be applied on next update
+	 */
+	u32 fixed_rate_idx;
+	struct dentry *dbg_fixed_rate;
+#endif
+
 };
 
 struct minstrel_debugfs_info {
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 333b511..66a1eeb 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -609,6 +609,13 @@
 
 	info->flags |= mi->tx_flags;
 	sample_idx = minstrel_get_sample_rate(mp, mi);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* use fixed index if set */
+	if (mp->fixed_rate_idx != -1)
+		sample_idx = mp->fixed_rate_idx;
+#endif
+
 	if (sample_idx >= 0) {
 		sample = true;
 		minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c6ae871..a06d64e 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -158,6 +158,8 @@
  * @work: work struct for starting/stopping aggregation
  * @tid_rx_timer_expired: bitmap indicating on which TIDs the
  *	RX timer expired until the work for it runs
+ * @tid_rx_stop_requested:  bitmap indicating which BA sessions per TID the
+ *	driver requested to close until the work for it runs
  * @mtx: mutex to protect all TX data (except non-NULL assignments
  *	to tid_tx[idx], which are protected by the sta spinlock)
  */
@@ -166,6 +168,7 @@
 	/* rx */
 	struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
 	unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
+	unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)];
 	/* tx */
 	struct work_struct work;
 	struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index 2c5b348..ba36c28 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -109,6 +109,16 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETIFACE
+	tristate "hash:net,iface set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:net,iface set type support, by which
+	  one can store IPv4/IPv6 network address/prefix and
+	  interface name pairs as elements in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_LIST_SET
 	tristate "list:set set support"
 	depends on IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
index 5adbdab..6e965ec 100644
--- a/net/netfilter/ipset/Makefile
+++ b/net/netfilter/ipset/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
 obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
 obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
 
 # list types
 obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index ba2d166..e3e7399 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -54,7 +54,7 @@
 }
 
 static int
-bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_ip *map = set->data;
 	u16 id = *(u16 *)value;
@@ -63,7 +63,7 @@
 }
 
 static int
-bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ip *map = set->data;
 	u16 id = *(u16 *)value;
@@ -75,7 +75,7 @@
 }
 
 static int
-bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ip *map = set->data;
 	u16 id = *(u16 *)value;
@@ -131,7 +131,7 @@
 /* Timeout variant */
 
 static int
-bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_ip *map = set->data;
 	const unsigned long *members = map->members;
@@ -141,13 +141,13 @@
 }
 
 static int
-bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ip *map = set->data;
 	unsigned long *members = map->members;
 	u16 id = *(u16 *)value;
 
-	if (ip_set_timeout_test(members[id]))
+	if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
 		return -IPSET_ERR_EXIST;
 
 	members[id] = ip_set_timeout_set(timeout);
@@ -156,7 +156,7 @@
 }
 
 static int
-bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ip *map = set->data;
 	unsigned long *members = map->members;
@@ -219,24 +219,25 @@
 
 static int
 bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
-	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	       const struct xt_action_param *par,
+	       enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	struct bitmap_ip *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	u32 ip;
 
-	ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
 	if (ip < map->first_ip || ip > map->last_ip)
 		return -IPSET_ERR_BITMAP_RANGE;
 
 	ip = ip_to_id(map, ip);
 
-	return adtfn(set, &ip, map->timeout);
+	return adtfn(set, &ip, opt_timeout(opt, map), opt->cmdflags);
 }
 
 static int
 bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
-	       enum ipset_adt adt, u32 *lineno, u32 flags)
+	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	struct bitmap_ip *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -266,7 +267,7 @@
 
 	if (adt == IPSET_TEST) {
 		id = ip_to_id(map, ip);
-		return adtfn(set, &id, timeout);
+		return adtfn(set, &id, timeout, flags);
 	}
 
 	if (tb[IPSET_ATTR_IP_TO]) {
@@ -283,8 +284,7 @@
 
 		if (cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		ip &= ip_set_hostmask(cidr);
-		ip_to = ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(ip, ip_to, cidr);
 	} else
 		ip_to = ip;
 
@@ -293,7 +293,7 @@
 
 	for (; !before(ip_to, ip); ip += map->hosts) {
 		id = ip_to_id(map, ip);
-		ret = adtfn(set, &id, timeout);
+		ret = adtfn(set, &id, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -478,7 +478,7 @@
 
 		if (cidr >= 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		last_ip = first_ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(first_ip, last_ip, cidr);
 	} else
 		return -IPSET_ERR_PROTOCOL;
 
@@ -551,7 +551,8 @@
 	.features	= IPSET_TYPE_IP,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= AF_INET,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 0,
 	.create		= bitmap_ip_create,
 	.create_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index a274300..56096f5 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -99,7 +99,7 @@
 /* Base variant */
 
 static int
-bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
@@ -117,7 +117,7 @@
 }
 
 static int
-bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
@@ -146,7 +146,7 @@
 }
 
 static int
-bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
@@ -212,7 +212,7 @@
 /* Timeout variant */
 
 static int
-bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
@@ -231,15 +231,16 @@
 }
 
 static int
-bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
 	struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+	bool flag_exist = flags & IPSET_FLAG_EXIST;
 
 	switch (elem->match) {
 	case MAC_UNSET:
-		if (!data->ether)
+		if (!(data->ether || flag_exist))
 			/* Already added without ethernet address */
 			return -IPSET_ERR_EXIST;
 		/* Fill the MAC address and activate the timer */
@@ -251,7 +252,7 @@
 		elem->timeout = ip_set_timeout_set(timeout);
 		break;
 	case MAC_FILLED:
-		if (!bitmap_expired(map, data->id))
+		if (!(bitmap_expired(map, data->id) || flag_exist))
 			return -IPSET_ERR_EXIST;
 		/* Fall through */
 	case MAC_EMPTY:
@@ -273,7 +274,7 @@
 }
 
 static int
-bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_ipmac *map = set->data;
 	const struct ipmac *data = value;
@@ -337,17 +338,18 @@
 
 static int
 bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
-		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		  const struct xt_action_param *par,
+		  enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	struct bitmap_ipmac *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct ipmac data;
 
 	/* MAC can be src only */
-	if (!(flags & IPSET_DIM_TWO_SRC))
+	if (!(opt->flags & IPSET_DIM_TWO_SRC))
 		return 0;
 
-	data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+	data.id = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
 	if (data.id < map->first_ip || data.id > map->last_ip)
 		return -IPSET_ERR_BITMAP_RANGE;
 
@@ -359,12 +361,12 @@
 	data.id -= map->first_ip;
 	data.ether = eth_hdr(skb)->h_source;
 
-	return adtfn(set, &data, map->timeout);
+	return adtfn(set, &data, opt_timeout(opt, map), opt->cmdflags);
 }
 
 static int
 bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
-		  enum ipset_adt adt, u32 *lineno, u32 flags)
+		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct bitmap_ipmac *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -399,7 +401,7 @@
 
 	data.id -= map->first_ip;
 
-	ret = adtfn(set, &data, timeout);
+	ret = adtfn(set, &data, timeout, flags);
 
 	return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -577,7 +579,7 @@
 
 		if (cidr >= 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		last_ip = first_ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(first_ip, last_ip, cidr);
 	} else
 		return -IPSET_ERR_PROTOCOL;
 
@@ -622,7 +624,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_MAC,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= AF_INET,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 0,
 	.create		= bitmap_ipmac_create,
 	.create_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
@@ -632,7 +635,8 @@
 	},
 	.adt_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
-		[IPSET_ATTR_ETHER]	= { .type = NLA_BINARY, .len  = ETH_ALEN },
+		[IPSET_ATTR_ETHER]	= { .type = NLA_BINARY,
+					    .len  = ETH_ALEN },
 		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
 		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
 	},
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 6b38eb8..29ba93b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -40,7 +40,7 @@
 /* Base variant */
 
 static int
-bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_port *map = set->data;
 	u16 id = *(u16 *)value;
@@ -49,7 +49,7 @@
 }
 
 static int
-bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_port *map = set->data;
 	u16 id = *(u16 *)value;
@@ -61,7 +61,7 @@
 }
 
 static int
-bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_port *map = set->data;
 	u16 id = *(u16 *)value;
@@ -119,7 +119,7 @@
 /* Timeout variant */
 
 static int
-bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	const struct bitmap_port *map = set->data;
 	const unsigned long *members = map->members;
@@ -129,13 +129,13 @@
 }
 
 static int
-bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_port *map = set->data;
 	unsigned long *members = map->members;
 	u16 id = *(u16 *)value;
 
-	if (ip_set_timeout_test(members[id]))
+	if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
 		return -IPSET_ERR_EXIST;
 
 	members[id] = ip_set_timeout_set(timeout);
@@ -144,7 +144,7 @@
 }
 
 static int
-bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
 {
 	struct bitmap_port *map = set->data;
 	unsigned long *members = map->members;
@@ -208,14 +208,16 @@
 
 static int
 bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
-		 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		 const struct xt_action_param *par,
+		 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	struct bitmap_port *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	__be16 __port;
 	u16 port = 0;
 
-	if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+	if (!ip_set_get_ip_port(skb, opt->family,
+				opt->flags & IPSET_DIM_ONE_SRC, &__port))
 		return -EINVAL;
 
 	port = ntohs(__port);
@@ -225,12 +227,12 @@
 
 	port -= map->first_port;
 
-	return adtfn(set, &port, map->timeout);
+	return adtfn(set, &port, opt_timeout(opt, map), opt->cmdflags);
 }
 
 static int
 bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
-		 enum ipset_adt adt, u32 *lineno, u32 flags)
+		 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	struct bitmap_port *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -259,7 +261,7 @@
 
 	if (adt == IPSET_TEST) {
 		id = port - map->first_port;
-		return adtfn(set, &id, timeout);
+		return adtfn(set, &id, timeout, flags);
 	}
 
 	if (tb[IPSET_ATTR_PORT_TO]) {
@@ -277,7 +279,7 @@
 
 	for (; port <= port_to; port++) {
 		id = port - map->first_port;
-		ret = adtfn(set, &id, timeout);
+		ret = adtfn(set, &id, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -482,7 +484,8 @@
 	.features	= IPSET_TYPE_PORT,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= AF_UNSPEC,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 0,
 	.create		= bitmap_port_create,
 	.create_policy	= {
 		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 42aa64b..c012985 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -21,6 +21,7 @@
 #include <net/netlink.h>
 
 #include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/ipset/ip_set.h>
 
@@ -70,7 +71,8 @@
 	list_for_each_entry_rcu(type, &ip_set_type_list, list)
 		if (STREQ(type->name, name) &&
 		    (type->family == family || type->family == AF_UNSPEC) &&
-		    type->revision == revision)
+		    revision >= type->revision_min &&
+		    revision <= type->revision_max)
 			return type;
 	return NULL;
 }
@@ -135,10 +137,10 @@
 		if (STREQ(type->name, name) &&
 		    (type->family == family || type->family == AF_UNSPEC)) {
 			found = true;
-			if (type->revision < *min)
-				*min = type->revision;
-			if (type->revision > *max)
-				*max = type->revision;
+			if (type->revision_min < *min)
+				*min = type->revision_min;
+			if (type->revision_max > *max)
+				*max = type->revision_max;
 		}
 	rcu_read_unlock();
 	if (found)
@@ -159,25 +161,27 @@
 	int ret = 0;
 
 	if (type->protocol != IPSET_PROTOCOL) {
-		pr_warning("ip_set type %s, family %s, revision %u uses "
+		pr_warning("ip_set type %s, family %s, revision %u:%u uses "
 			   "wrong protocol version %u (want %u)\n",
 			   type->name, family_name(type->family),
-			   type->revision, type->protocol, IPSET_PROTOCOL);
+			   type->revision_min, type->revision_max,
+			   type->protocol, IPSET_PROTOCOL);
 		return -EINVAL;
 	}
 
 	ip_set_type_lock();
-	if (find_set_type(type->name, type->family, type->revision)) {
+	if (find_set_type(type->name, type->family, type->revision_min)) {
 		/* Duplicate! */
-		pr_warning("ip_set type %s, family %s, revision %u "
+		pr_warning("ip_set type %s, family %s with revision min %u "
 			   "already registered!\n", type->name,
-			   family_name(type->family), type->revision);
+			   family_name(type->family), type->revision_min);
 		ret = -EINVAL;
 		goto unlock;
 	}
 	list_add_rcu(&type->list, &ip_set_type_list);
-	pr_debug("type %s, family %s, revision %u registered.\n",
-		 type->name, family_name(type->family), type->revision);
+	pr_debug("type %s, family %s, revision %u:%u registered.\n",
+		 type->name, family_name(type->family),
+		 type->revision_min, type->revision_max);
 unlock:
 	ip_set_type_unlock();
 	return ret;
@@ -189,15 +193,15 @@
 ip_set_type_unregister(struct ip_set_type *type)
 {
 	ip_set_type_lock();
-	if (!find_set_type(type->name, type->family, type->revision)) {
-		pr_warning("ip_set type %s, family %s, revision %u "
+	if (!find_set_type(type->name, type->family, type->revision_min)) {
+		pr_warning("ip_set type %s, family %s with revision min %u "
 			   "not registered\n", type->name,
-			   family_name(type->family), type->revision);
+			   family_name(type->family), type->revision_min);
 		goto unlock;
 	}
 	list_del_rcu(&type->list);
-	pr_debug("type %s, family %s, revision %u unregistered.\n",
-		 type->name, family_name(type->family), type->revision);
+	pr_debug("type %s, family %s with revision min %u unregistered.\n",
+		 type->name, family_name(type->family), type->revision_min);
 unlock:
 	ip_set_type_unlock();
 
@@ -325,7 +329,8 @@
 
 int
 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
-	    u8 family, u8 dim, u8 flags)
+	    const struct xt_action_param *par,
+	    const struct ip_set_adt_opt *opt)
 {
 	struct ip_set *set = ip_set_list[index];
 	int ret = 0;
@@ -333,19 +338,19 @@
 	BUG_ON(set == NULL);
 	pr_debug("set %s, index %u\n", set->name, index);
 
-	if (dim < set->type->dimension ||
-	    !(family == set->family || set->family == AF_UNSPEC))
+	if (opt->dim < set->type->dimension ||
+	    !(opt->family == set->family || set->family == AF_UNSPEC))
 		return 0;
 
 	read_lock_bh(&set->lock);
-	ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+	ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
 	read_unlock_bh(&set->lock);
 
 	if (ret == -EAGAIN) {
 		/* Type requests element to be completed */
 		pr_debug("element must be competed, ADD is triggered\n");
 		write_lock_bh(&set->lock);
-		set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+		set->variant->kadt(set, skb, par, IPSET_ADD, opt);
 		write_unlock_bh(&set->lock);
 		ret = 1;
 	}
@@ -357,7 +362,8 @@
 
 int
 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
-	   u8 family, u8 dim, u8 flags)
+	   const struct xt_action_param *par,
+	   const struct ip_set_adt_opt *opt)
 {
 	struct ip_set *set = ip_set_list[index];
 	int ret;
@@ -365,12 +371,12 @@
 	BUG_ON(set == NULL);
 	pr_debug("set %s, index %u\n", set->name, index);
 
-	if (dim < set->type->dimension ||
-	    !(family == set->family || set->family == AF_UNSPEC))
+	if (opt->dim < set->type->dimension ||
+	    !(opt->family == set->family || set->family == AF_UNSPEC))
 		return 0;
 
 	write_lock_bh(&set->lock);
-	ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+	ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
 	write_unlock_bh(&set->lock);
 
 	return ret;
@@ -379,7 +385,8 @@
 
 int
 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
-	   u8 family, u8 dim, u8 flags)
+	   const struct xt_action_param *par,
+	   const struct ip_set_adt_opt *opt)
 {
 	struct ip_set *set = ip_set_list[index];
 	int ret = 0;
@@ -387,12 +394,12 @@
 	BUG_ON(set == NULL);
 	pr_debug("set %s, index %u\n", set->name, index);
 
-	if (dim < set->type->dimension ||
-	    !(family == set->family || set->family == AF_UNSPEC))
+	if (opt->dim < set->type->dimension ||
+	    !(opt->family == set->family || set->family == AF_UNSPEC))
 		return 0;
 
 	write_lock_bh(&set->lock);
-	ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+	ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
 	write_unlock_bh(&set->lock);
 
 	return ret;
@@ -656,6 +663,7 @@
 	rwlock_init(&set->lock);
 	strlcpy(set->name, name, IPSET_MAXNAMELEN);
 	set->family = family;
+	set->revision = revision;
 
 	/*
 	 * Next, check that we know the type, and take
@@ -675,8 +683,8 @@
 	if (attr[IPSET_ATTR_DATA] &&
 	    nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
 			     set->type->create_policy)) {
-	    	ret = -IPSET_ERR_PROTOCOL;
-	    	goto put_out;
+		ret = -IPSET_ERR_PROTOCOL;
+		goto put_out;
 	}
 
 	ret = set->type->create(set, tb, flags);
@@ -696,7 +704,8 @@
 		    (flags & IPSET_FLAG_EXIST) &&
 		    STREQ(set->type->name, clash->type->name) &&
 		    set->type->family == clash->type->family &&
-		    set->type->revision == clash->type->revision &&
+		    set->type->revision_min == clash->type->revision_min &&
+		    set->type->revision_max == clash->type->revision_max &&
 		    set->variant->same_set(set, clash))
 			ret = 0;
 		goto cleanup;
@@ -939,10 +948,13 @@
 
 /* List/save set data */
 
-#define DUMP_INIT	0L
-#define DUMP_ALL	1L
-#define DUMP_ONE	2L
-#define DUMP_LAST	3L
+#define DUMP_INIT	0
+#define DUMP_ALL	1
+#define DUMP_ONE	2
+#define DUMP_LAST	3
+
+#define DUMP_TYPE(arg)		(((u32)(arg)) & 0x0000FFFF)
+#define DUMP_FLAGS(arg)		(((u32)(arg)) >> 16)
 
 static int
 ip_set_dump_done(struct netlink_callback *cb)
@@ -973,6 +985,7 @@
 	int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
 	struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
 	struct nlattr *attr = (void *)nlh + min_len;
+	u32 dump_type;
 	ip_set_id_t index;
 
 	/* Second pass, so parser can't fail */
@@ -984,17 +997,22 @@
 	 *         [..]: type specific
 	 */
 
-	if (!cda[IPSET_ATTR_SETNAME]) {
-		cb->args[0] = DUMP_ALL;
-		return 0;
+	if (cda[IPSET_ATTR_SETNAME]) {
+		index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+		if (index == IPSET_INVALID_ID)
+			return -ENOENT;
+
+		dump_type = DUMP_ONE;
+		cb->args[1] = index;
+	} else
+		dump_type = DUMP_ALL;
+
+	if (cda[IPSET_ATTR_FLAGS]) {
+		u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
+		dump_type |= (f << 16);
 	}
+	cb->args[0] = dump_type;
 
-	index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
-	if (index == IPSET_INVALID_ID)
-		return -ENOENT;
-
-	cb->args[0] = DUMP_ONE;
-	cb->args[1] = index;
 	return 0;
 }
 
@@ -1005,9 +1023,10 @@
 	struct ip_set *set = NULL;
 	struct nlmsghdr *nlh = NULL;
 	unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+	u32 dump_type, dump_flags;
 	int ret = 0;
 
-	if (cb->args[0] == DUMP_INIT) {
+	if (!cb->args[0]) {
 		ret = dump_init(cb);
 		if (ret < 0) {
 			nlh = nlmsg_hdr(cb->skb);
@@ -1022,14 +1041,17 @@
 	if (cb->args[1] >= ip_set_max)
 		goto out;
 
-	max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+	dump_type = DUMP_TYPE(cb->args[0]);
+	dump_flags = DUMP_FLAGS(cb->args[0]);
+	max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
 dump_last:
-	pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+	pr_debug("args[0]: %u %u args[1]: %ld\n",
+		 dump_type, dump_flags, cb->args[1]);
 	for (; cb->args[1] < max; cb->args[1]++) {
 		index = (ip_set_id_t) cb->args[1];
 		set = ip_set_list[index];
 		if (set == NULL) {
-			if (cb->args[0] == DUMP_ONE) {
+			if (dump_type == DUMP_ONE) {
 				ret = -ENOENT;
 				goto out;
 			}
@@ -1038,8 +1060,8 @@
 		/* When dumping all sets, we must dump "sorted"
 		 * so that lists (unions of sets) are dumped last.
 		 */
-		if (cb->args[0] != DUMP_ONE &&
-		    ((cb->args[0] == DUMP_ALL) ==
+		if (dump_type != DUMP_ONE &&
+		    ((dump_type == DUMP_ALL) ==
 		     !!(set->type->features & IPSET_DUMP_LAST)))
 			continue;
 		pr_debug("List set: %s\n", set->name);
@@ -1057,6 +1079,8 @@
 		}
 		NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
 		NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+		if (dump_flags & IPSET_FLAG_LIST_SETNAME)
+			goto next_set;
 		switch (cb->args[2]) {
 		case 0:
 			/* Core header data */
@@ -1065,28 +1089,27 @@
 			NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
 				   set->family);
 			NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
-				   set->type->revision);
+				   set->revision);
 			ret = set->variant->head(set, skb);
 			if (ret < 0)
 				goto release_refcount;
+			if (dump_flags & IPSET_FLAG_LIST_HEADER)
+				goto next_set;
 			/* Fall through and add elements */
 		default:
 			read_lock_bh(&set->lock);
 			ret = set->variant->list(set, skb, cb);
 			read_unlock_bh(&set->lock);
-			if (!cb->args[2]) {
+			if (!cb->args[2])
 				/* Set is done, proceed with next one */
-				if (cb->args[0] == DUMP_ONE)
-					cb->args[1] = IPSET_INVALID_ID;
-				else
-					cb->args[1]++;
-			}
+				goto next_set;
 			goto release_refcount;
 		}
 	}
 	/* If we dump all sets, continue with dumping last ones */
-	if (cb->args[0] == DUMP_ALL) {
-		cb->args[0] = DUMP_LAST;
+	if (dump_type == DUMP_ALL) {
+		dump_type = DUMP_LAST;
+		cb->args[0] = dump_type | (dump_flags << 16);
 		cb->args[1] = 0;
 		goto dump_last;
 	}
@@ -1094,6 +1117,11 @@
 
 nla_put_failure:
 	ret = -EFAULT;
+next_set:
+	if (dump_type == DUMP_ONE)
+		cb->args[1] = IPSET_INVALID_ID;
+	else
+		cb->args[1]++;
 release_refcount:
 	/* If there was an error or set is done, release set */
 	if (ret || !cb->args[2]) {
@@ -1120,7 +1148,7 @@
 
 	return netlink_dump_start(ctnl, skb, nlh,
 				  ip_set_dump_start,
-				  ip_set_dump_done);
+				  ip_set_dump_done, 0);
 }
 
 /* Add, del and test */
@@ -1139,17 +1167,18 @@
 	struct nlattr *tb[], enum ipset_adt adt,
 	u32 flags, bool use_lineno)
 {
-	int ret, retried = 0;
+	int ret;
 	u32 lineno = 0;
-	bool eexist = flags & IPSET_FLAG_EXIST;
+	bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
 
 	do {
 		write_lock_bh(&set->lock);
-		ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+		ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
 		write_unlock_bh(&set->lock);
+		retried = true;
 	} while (ret == -EAGAIN &&
 		 set->variant->resize &&
-		 (ret = set->variant->resize(set, retried++)) == 0);
+		 (ret = set->variant->resize(set, retried)) == 0);
 
 	if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
 		return 0;
@@ -1322,7 +1351,7 @@
 		return -IPSET_ERR_PROTOCOL;
 
 	read_lock_bh(&set->lock);
-	ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+	ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
 	read_unlock_bh(&set->lock);
 	/* Userspace can't trigger element to be re-added */
 	if (ret == -EAGAIN)
@@ -1365,7 +1394,7 @@
 	NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
 	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
 	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
 	nlmsg_end(skb2, nlh2);
 
 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 43bcce2..fa80bb9 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -108,25 +108,32 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+}
+
 static int
 hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
-	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	      const struct xt_action_param *par,
+	      enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	__be32 ip;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
 	ip &= ip_set_netmask(h->netmask);
 	if (ip == 0)
 		return -EINVAL;
 
-	return adtfn(set, &ip, h->timeout);
+	return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
-	      enum ipset_adt adt, u32 *lineno, u32 flags)
+	      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -157,7 +164,7 @@
 		nip = htonl(ip);
 		if (nip == 0)
 			return -IPSET_ERR_HASH_ELEM;
-		return adtfn(set, &nip, timeout);
+		return adtfn(set, &nip, timeout, flags);
 	}
 
 	if (tb[IPSET_ATTR_IP_TO]) {
@@ -171,18 +178,19 @@
 
 		if (cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		ip &= ip_set_hostmask(cidr);
-		ip_to = ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(ip, ip_to, cidr);
 	} else
 		ip_to = ip;
 
 	hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
+	if (retried)
+		ip = h->next.ip;
 	for (; !before(ip_to, ip); ip += hosts) {
 		nip = htonl(ip);
 		if (nip == 0)
 			return -IPSET_ERR_HASH_ELEM;
-		ret = adtfn(set, &nip, timeout);
+		ret = adtfn(set, &nip, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -281,20 +289,26 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ip6_data_next(struct ip_set_hash *h, const struct hash_ip6_elem *d)
+{
+}
+
 static int
 hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
-	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	      const struct xt_action_param *par,
+	      enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	union nf_inet_addr ip;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip.in6);
 	ip6_netmask(&ip, h->netmask);
 	if (ipv6_addr_any(&ip.in6))
 		return -EINVAL;
 
-	return adtfn(set, &ip, h->timeout);
+	return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
@@ -305,7 +319,7 @@
 
 static int
 hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
-	      enum ipset_adt adt, u32 *lineno, u32 flags)
+	      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -336,7 +350,7 @@
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 
-	ret = adtfn(set, &ip, timeout);
+	ret = adtfn(set, &ip, timeout, flags);
 
 	return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -428,7 +442,8 @@
 	.features	= IPSET_TYPE_IP,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= AF_UNSPEC,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 0,
 	.create		= hash_ip_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 14281b6..bbf51b6 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -124,31 +124,40 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipport4_data_next(struct ip_set_hash *h,
+		       const struct hash_ipport4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
-		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		  const struct xt_action_param *par,
+		  enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport4_elem data = { };
 
-	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
-		  enum ipset_adt adt, u32 *lineno, u32 flags)
+		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport4_elem data = { };
-	u32 ip, ip_to, p, port, port_to;
+	u32 ip, ip_to, p = 0, port, port_to;
 	u32 timeout = h->timeout;
 	bool with_ports = false;
 	int ret;
@@ -192,7 +201,7 @@
 	if (adt == IPSET_TEST ||
 	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
 	      tb[IPSET_ATTR_PORT_TO])) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -208,8 +217,7 @@
 
 		if (cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		ip &= ip_set_hostmask(cidr);
-		ip_to = ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(ip, ip_to, cidr);
 	} else
 		ip_to = ip;
 
@@ -220,17 +228,21 @@
 			swap(port, port_to);
 	}
 
-	for (; !before(ip_to, ip); ip++)
-		for (p = port; p <= port_to; p++) {
+	if (retried)
+		ip = h->next.ip;
+	for (; !before(ip_to, ip); ip++) {
+		p = retried && ip == h->next.ip ? h->next.port : port;
+		for (; p <= port_to; p++) {
 			data.ip = htonl(ip);
 			data.port = htons(p);
-			ret = adtfn(set, &data, timeout);
+			ret = adtfn(set, &data, timeout, flags);
 
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
 			else
 				ret = 0;
 		}
+	}
 	return ret;
 }
 
@@ -328,26 +340,34 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipport6_data_next(struct ip_set_hash *h,
+		       const struct hash_ipport6_elem *d)
+{
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
-		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		  const struct xt_action_param *par,
+		  enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipport6_elem data = { };
 
-	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
-		  enum ipset_adt adt, u32 *lineno, u32 flags)
+		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -396,7 +416,7 @@
 	}
 
 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -405,9 +425,11 @@
 	if (port > port_to)
 		swap(port, port_to);
 
+	if (retried)
+		port = h->next.port;
 	for (; port <= port_to; port++) {
 		data.port = htons(port);
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -491,7 +513,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= AF_UNSPEC,
-	.revision	= 1,
+	.revision_min	= 0,
+	.revision_max	= 1,	/* SCTP and UDPLITE support added */
 	.create		= hash_ipport_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 401c8a2..96525f5 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -127,32 +127,41 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipportip4_data_next(struct ip_set_hash *h,
+			 const struct hash_ipportip4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
-		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		    const struct xt_action_param *par,
+		    enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip4_elem data = { };
 
-	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
-	ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
-		    enum ipset_adt adt, u32 *lineno, u32 flags)
+		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip4_elem data = { };
-	u32 ip, ip_to, p, port, port_to;
+	u32 ip, ip_to, p = 0, port, port_to;
 	u32 timeout = h->timeout;
 	bool with_ports = false;
 	int ret;
@@ -200,7 +209,7 @@
 	if (adt == IPSET_TEST ||
 	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
 	      tb[IPSET_ATTR_PORT_TO])) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -216,8 +225,7 @@
 
 		if (cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		ip &= ip_set_hostmask(cidr);
-		ip_to = ip | ~ip_set_hostmask(cidr);
+		ip_set_mask_from_to(ip, ip_to, cidr);
 	} else
 		ip_to = ip;
 
@@ -228,17 +236,21 @@
 			swap(port, port_to);
 	}
 
-	for (; !before(ip_to, ip); ip++)
-		for (p = port; p <= port_to; p++) {
+	if (retried)
+		ip = h->next.ip;
+	for (; !before(ip_to, ip); ip++) {
+		p = retried && ip == h->next.ip ? h->next.port : port;
+		for (; p <= port_to; p++) {
 			data.ip = htonl(ip);
 			data.port = htons(p);
-			ret = adtfn(set, &data, timeout);
+			ret = adtfn(set, &data, timeout, flags);
 
 			if (ret && !ip_set_eexist(ret, flags))
 				return ret;
 			else
 				ret = 0;
 		}
+	}
 	return ret;
 }
 
@@ -341,27 +353,35 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipportip6_data_next(struct ip_set_hash *h,
+			 const struct hash_ipportip6_elem *d)
+{
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
-		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		    const struct xt_action_param *par,
+		    enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportip6_elem data = { };
 
-	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
-	ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
-		    enum ipset_adt adt, u32 *lineno, u32 flags)
+		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -414,7 +434,7 @@
 	}
 
 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -423,9 +443,11 @@
 	if (port > port_to)
 		swap(port, port_to);
 
+	if (retried)
+		port = h->next.port;
 	for (; port <= port_to; port++) {
 		data.port = htons(port);
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -509,7 +531,8 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
 	.dimension	= IPSET_DIM_THREE,
 	.family		= AF_UNSPEC,
-	.revision	= 1,
+	.revision_min	= 0,
+	.revision_max	= 1,	/* SCTP and UDPLITE support added */
 	.create		= hash_ipportip_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 565a7c5..d2d6ab8 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -140,9 +140,19 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipportnet4_data_next(struct ip_set_hash *h,
+			  const struct hash_ipportnet4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+	h->next.port = ntohs(d->port);
+	h->next.ip2 = ntohl(d->ip2);
+}
+
 static int
 hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
-		     enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		     const struct xt_action_param *par,
+		     enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -155,25 +165,26 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
-	ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
 	data.ip2 &= ip_set_netmask(data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
-		     enum ipset_adt adt, u32 *lineno, u32 flags)
+		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
-	u32 ip, ip_to, p, port, port_to;
+	u32 ip, ip_to, p = 0, port, port_to;
+	u32 ip2_from = 0, ip2_to, ip2_last, ip2;
 	u32 timeout = h->timeout;
 	bool with_ports = false;
 	int ret;
@@ -187,21 +198,19 @@
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
 	if (ret)
 		return ret;
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR2])
+	if (tb[IPSET_ATTR_CIDR2]) {
 		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-
-	if (!data.cidr)
-		return -IPSET_ERR_INVALID_CIDR;
-
-	data.ip2 &= ip_set_netmask(data.cidr);
+		if (!data.cidr)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	if (tb[IPSET_ATTR_PORT])
 		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -226,14 +235,16 @@
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 
+	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
 	if (adt == IPSET_TEST ||
-	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
-	      tb[IPSET_ATTR_PORT_TO])) {
-		ret = adtfn(set, &data, timeout);
+	    !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
+	      tb[IPSET_ATTR_IP2_TO])) {
+		data.ip = htonl(ip);
+		data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr));
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
-	ip = ntohl(data.ip);
 	if (tb[IPSET_ATTR_IP_TO]) {
 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
 		if (ret)
@@ -245,29 +256,50 @@
 
 		if (cidr > 32)
 			return -IPSET_ERR_INVALID_CIDR;
-		ip &= ip_set_hostmask(cidr);
-		ip_to = ip | ~ip_set_hostmask(cidr);
-	} else
-		ip_to = ip;
+		ip_set_mask_from_to(ip, ip_to, cidr);
+	}
 
 	port_to = port = ntohs(data.port);
-	if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
+	if (tb[IPSET_ATTR_PORT_TO]) {
 		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
 		if (port > port_to)
 			swap(port, port_to);
 	}
+	if (tb[IPSET_ATTR_IP2_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+		if (ret)
+			return ret;
+		if (ip2_from > ip2_to)
+			swap(ip2_from, ip2_to);
+		if (ip2_from + UINT_MAX == ip2_to)
+			return -IPSET_ERR_HASH_RANGE;
+	} else {
+		ip_set_mask_from_to(ip2_from, ip2_to, data.cidr);
+	}
 
-	for (; !before(ip_to, ip); ip++)
-		for (p = port; p <= port_to; p++) {
-			data.ip = htonl(ip);
+	if (retried)
+		ip = h->next.ip;
+	for (; !before(ip_to, ip); ip++) {
+		data.ip = htonl(ip);
+		p = retried && ip == h->next.ip ? h->next.port : port;
+		for (; p <= port_to; p++) {
 			data.port = htons(p);
-			ret = adtfn(set, &data, timeout);
+			ip2 = retried && ip == h->next.ip && p == h->next.port
+				? h->next.ip2 : ip2_from;
+			while (!after(ip2, ip2_to)) {
+				data.ip2 = htonl(ip2);
+				ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+								&data.cidr);
+				ret = adtfn(set, &data, timeout, flags);
 
-			if (ret && !ip_set_eexist(ret, flags))
-				return ret;
-			else
-				ret = 0;
+				if (ret && !ip_set_eexist(ret, flags))
+					return ret;
+				else
+					ret = 0;
+				ip2 = ip2_last + 1;
+			}
 		}
+	}
 	return ret;
 }
 
@@ -389,9 +421,17 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_ipportnet6_data_next(struct ip_set_hash *h,
+			  const struct hash_ipportnet6_elem *d)
+{
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
-		     enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		     const struct xt_action_param *par,
+		     enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -404,20 +444,20 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
-	ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
 	ip6_netmask(&data.ip2, data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
-		     enum ipset_adt adt, u32 *lineno, u32 flags)
+		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -434,6 +474,8 @@
 		     tb[IPSET_ATTR_IP_TO] ||
 		     tb[IPSET_ATTR_CIDR]))
 		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -478,7 +520,7 @@
 	}
 
 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -487,9 +529,11 @@
 	if (port > port_to)
 		swap(port, port_to);
 
+	if (retried)
+		port = h->next.port;
 	for (; port <= port_to; port++) {
 		data.port = htons(port);
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -576,7 +620,9 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
 	.dimension	= IPSET_DIM_THREE,
 	.family		= AF_UNSPEC,
-	.revision	= 1,
+	.revision_min	= 0,
+	/*		  1	   SCTP and UDPLITE support added */
+	.revision_max	= 2,	/* Range as input support for IPv4 added */
 	.create		= hash_ipportnet_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -589,6 +635,7 @@
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
 		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
 		[IPSET_ATTR_IP2]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2_TO]	= { .type = NLA_NESTED },
 		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
 		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
 		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 2aeeabc..2d4b1f4 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -125,9 +125,17 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_net4_data_next(struct ip_set_hash *h,
+		    const struct hash_net4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+}
+
 static int
 hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
-	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	       const struct xt_action_param *par,
+	       enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -140,20 +148,21 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
 	data.ip &= ip_set_netmask(data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
-	       enum ipset_adt adt, u32 *lineno, u32 flags)
+	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_net4_elem data = { .cidr = HOST_MASK };
 	u32 timeout = h->timeout;
+	u32 ip = 0, ip_to, last;
 	int ret;
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -163,17 +172,15 @@
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
+	if (tb[IPSET_ATTR_CIDR]) {
 		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
-	if (!data.cidr)
-		return -IPSET_ERR_INVALID_CIDR;
-
-	data.ip &= ip_set_netmask(data.cidr);
+		if (!data.cidr)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	if (tb[IPSET_ATTR_TIMEOUT]) {
 		if (!with_timeout(h->timeout))
@@ -181,9 +188,35 @@
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 
-	ret = adtfn(set, &data, timeout);
+	if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
+		data.ip = htonl(ip & ip_set_hostmask(data.cidr));
+		ret = adtfn(set, &data, timeout, flags);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
 
-	return ip_set_eexist(ret, flags) ? 0 : ret;
+	ip_to = ip;
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip_to < ip)
+			swap(ip, ip_to);
+		if (ip + UINT_MAX == ip_to)
+			return -IPSET_ERR_HASH_RANGE;
+	}
+	if (retried)
+		ip = h->next.ip;
+	while (!after(ip, ip_to)) {
+		data.ip = htonl(ip);
+		last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
+		ret = adtfn(set, &data, timeout, flags);
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+		ip = last + 1;
+	}
+	return ret;
 }
 
 static bool
@@ -292,9 +325,16 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_net6_data_next(struct ip_set_hash *h,
+		    const struct hash_net6_elem *d)
+{
+}
+
 static int
 hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
-	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	       const struct xt_action_param *par,
+	       enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -307,15 +347,15 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
 	ip6_netmask(&data.ip, data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
-	       enum ipset_adt adt, u32 *lineno, u32 flags)
+	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -326,6 +366,8 @@
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
 		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -348,7 +390,7 @@
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 
-	ret = adtfn(set, &data, timeout);
+	ret = adtfn(set, &data, timeout, flags);
 
 	return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -429,7 +471,8 @@
 	.features	= IPSET_TYPE_IP,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= AF_UNSPEC,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 1,	/* Range as input support for IPv4 added */
 	.create		= hash_net_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -440,6 +483,7 @@
 	},
 	.adt_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
 		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
 		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
 	},
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
new file mode 100644
index 0000000..3d6c53b
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -0,0 +1,762 @@
+/* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,iface type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/rbtree.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,iface type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,iface");
+
+/* Interface name rbtree */
+
+struct iface_node {
+	struct rb_node node;
+	char iface[IFNAMSIZ];
+};
+
+#define iface_data(n)	(rb_entry(n, struct iface_node, node)->iface)
+
+static inline long
+ifname_compare(const char *_a, const char *_b)
+{
+	const long *a = (const long *)_a;
+	const long *b = (const long *)_b;
+
+	BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+	if (a[0] != b[0])
+		return a[0] - b[0];
+	if (IFNAMSIZ > sizeof(long)) {
+		if (a[1] != b[1])
+			return a[1] - b[1];
+	}
+	if (IFNAMSIZ > 2 * sizeof(long)) {
+		if (a[2] != b[2])
+			return a[2] - b[2];
+	}
+	if (IFNAMSIZ > 3 * sizeof(long)) {
+		if (a[3] != b[3])
+			return a[3] - b[3];
+	}
+	return 0;
+}
+
+static void
+rbtree_destroy(struct rb_root *root)
+{
+	struct rb_node *p, *n = root->rb_node;
+	struct iface_node *node;
+
+	/* Non-recursive destroy, like in ext3 */
+	while (n) {
+		if (n->rb_left) {
+			n = n->rb_left;
+			continue;
+		}
+		if (n->rb_right) {
+			n = n->rb_right;
+			continue;
+		}
+		p = rb_parent(n);
+		node = rb_entry(n, struct iface_node, node);
+		if (!p)
+			*root = RB_ROOT;
+		else if (p->rb_left == n)
+			p->rb_left = NULL;
+		else if (p->rb_right == n)
+			p->rb_right = NULL;
+
+		kfree(node);
+		n = p;
+	}
+}
+
+static int
+iface_test(struct rb_root *root, const char **iface)
+{
+	struct rb_node *n = root->rb_node;
+
+	while (n) {
+		const char *d = iface_data(n);
+		int res = ifname_compare(*iface, d);
+
+		if (res < 0)
+			n = n->rb_left;
+		else if (res > 0)
+			n = n->rb_right;
+		else {
+			*iface = d;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int
+iface_add(struct rb_root *root, const char **iface)
+{
+	struct rb_node **n = &(root->rb_node), *p = NULL;
+	struct iface_node *d;
+
+	while (*n) {
+		char *ifname = iface_data(*n);
+		int res = ifname_compare(*iface, ifname);
+
+		p = *n;
+		if (res < 0)
+			n = &((*n)->rb_left);
+		else if (res > 0)
+			n = &((*n)->rb_right);
+		else {
+			*iface = ifname;
+			return 0;
+		}
+	}
+
+	d = kzalloc(sizeof(*d), GFP_ATOMIC);
+	if (!d)
+		return -ENOMEM;
+	strcpy(d->iface, *iface);
+
+	rb_link_node(&d->node, p, n);
+	rb_insert_color(&d->node, root);
+
+	*iface = d->iface;
+	return 0;
+}
+
+/* Type specific function prefix */
+#define TYPE		hash_netiface
+
+static bool
+hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netiface4_same_set	hash_netiface_same_set
+#define hash_netiface6_same_set	hash_netiface_same_set
+
+#define STREQ(a, b)	(strcmp(a, b) == 0)
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netiface4_elem {
+	__be32 ip;
+	const char *iface;
+	u8 physdev;
+	u8 cidr;
+	u16 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_netiface4_telem {
+	__be32 ip;
+	const char *iface;
+	u8 physdev;
+	u8 cidr;
+	u16 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
+			  const struct hash_netiface4_elem *ip2)
+{
+	return ip1->ip == ip2->ip &&
+	       ip1->cidr == ip2->cidr &&
+	       ip1->physdev == ip2->physdev &&
+	       ip1->iface == ip2->iface;
+}
+
+static inline bool
+hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
+{
+	return elem->cidr == 0;
+}
+
+static inline void
+hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
+			 const struct hash_netiface4_elem *src) {
+	dst->ip = src->ip;
+	dst->cidr = src->cidr;
+	dst->physdev = src->physdev;
+	dst->iface = src->iface;
+}
+
+static inline void
+hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
+{
+	elem->ip &= ip_set_netmask(cidr);
+	elem->cidr = cidr;
+}
+
+static inline void
+hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
+{
+	elem->cidr = 0;
+}
+
+static bool
+hash_netiface4_data_list(struct sk_buff *skb,
+			 const struct hash_netiface4_elem *data)
+{
+	u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
+	if (flags)
+		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_netiface4_data_tlist(struct sk_buff *skb,
+			  const struct hash_netiface4_elem *data)
+{
+	const struct hash_netiface4_telem *tdata =
+		(const struct hash_netiface4_telem *)data;
+	u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
+	if (flags)
+		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+#define IP_SET_HASH_WITH_RBTREE
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static inline void
+hash_netiface4_data_next(struct ip_set_hash *h,
+			 const struct hash_netiface4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+}
+
+static int
+hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		    const struct xt_action_param *par,
+		    enum ipset_adt adt, const struct ip_set_adt_opt *opt)
+{
+	struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netiface4_elem data = {
+		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+	};
+	int ret;
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
+	data.ip &= ip_set_netmask(data.cidr);
+
+#define IFACE(dir)	(par->dir ? par->dir->name : NULL)
+#define PHYSDEV(dir)	(nf_bridge->dir ? nf_bridge->dir->name : NULL)
+#define SRCDIR		(opt->flags & IPSET_DIM_TWO_SRC)
+
+	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+#ifdef CONFIG_BRIDGE_NETFILTER
+		const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+
+		if (!nf_bridge)
+			return -EINVAL;
+		data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
+		data.physdev = 1;
+#else
+		data.iface = NULL;
+#endif
+	} else
+		data.iface = SRCDIR ? IFACE(in) : IFACE(out);
+
+	if (!data.iface)
+		return -EINVAL;
+	ret = iface_test(&h->rbtree, &data.iface);
+	if (adt == IPSET_ADD) {
+		if (!ret) {
+			ret = iface_add(&h->rbtree, &data.iface);
+			if (ret)
+				return ret;
+		}
+	} else if (!ret)
+		return ret;
+
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
+}
+
+static int
+hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
+		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netiface4_elem data = { .cidr = HOST_MASK };
+	u32 ip = 0, ip_to, last;
+	u32 timeout = h->timeout;
+	char iface[IFNAMSIZ] = {};
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !tb[IPSET_ATTR_IFACE] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR]) {
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+		if (!data.cidr)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
+	data.iface = iface;
+	ret = iface_test(&h->rbtree, &data.iface);
+	if (adt == IPSET_ADD) {
+		if (!ret) {
+			ret = iface_add(&h->rbtree, &data.iface);
+			if (ret)
+				return ret;
+		}
+	} else if (!ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_PHYSDEV)
+			data.physdev = 1;
+	}
+
+	if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
+		data.ip = htonl(ip & ip_set_hostmask(data.cidr));
+		ret = adtfn(set, &data, timeout, flags);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip_to < ip)
+			swap(ip, ip_to);
+		if (ip + UINT_MAX == ip_to)
+			return -IPSET_ERR_HASH_RANGE;
+	} else {
+		ip_set_mask_from_to(ip, ip_to, data.cidr);
+	}
+
+	if (retried)
+		ip = h->next.ip;
+	while (!after(ip, ip_to)) {
+		data.ip = htonl(ip);
+		last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
+		ret = adtfn(set, &data, timeout, flags);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+		ip = last + 1;
+	}
+	return ret;
+}
+
+static bool
+hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netiface6_elem {
+	union nf_inet_addr ip;
+	const char *iface;
+	u8 physdev;
+	u8 cidr;
+	u16 padding;
+};
+
+struct hash_netiface6_telem {
+	union nf_inet_addr ip;
+	const char *iface;
+	u8 physdev;
+	u8 cidr;
+	u16 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
+			  const struct hash_netiface6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ip1->cidr == ip2->cidr &&
+	       ip1->physdev == ip2->physdev &&
+	       ip1->iface == ip2->iface;
+}
+
+static inline bool
+hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
+{
+	return elem->cidr == 0;
+}
+
+static inline void
+hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
+			 const struct hash_netiface6_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
+{
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+	ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+	ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+	ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+	ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr)
+{
+	ip6_netmask(&elem->ip, cidr);
+	elem->cidr = cidr;
+}
+
+static bool
+hash_netiface6_data_list(struct sk_buff *skb,
+			 const struct hash_netiface6_elem *data)
+{
+	u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
+	if (flags)
+		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_netiface6_data_tlist(struct sk_buff *skb,
+			  const struct hash_netiface6_elem *data)
+{
+	const struct hash_netiface6_telem *e =
+		(const struct hash_netiface6_telem *)data;
+	u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
+	if (flags)
+		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static inline void
+hash_netiface6_data_next(struct ip_set_hash *h,
+			 const struct hash_netiface6_elem *d)
+{
+}
+
+static int
+hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		    const struct xt_action_param *par,
+		    enum ipset_adt adt, const struct ip_set_adt_opt *opt)
+{
+	struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netiface6_elem data = {
+		.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+	};
+	int ret;
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6_netmask(&data.ip, data.cidr);
+
+	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+#ifdef CONFIG_BRIDGE_NETFILTER
+		const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+
+		if (!nf_bridge)
+			return -EINVAL;
+		data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
+		data.physdev = 1;
+#else
+		data.iface = NULL;
+#endif
+	} else
+		data.iface = SRCDIR ? IFACE(in) : IFACE(out);
+
+	if (!data.iface)
+		return -EINVAL;
+	ret = iface_test(&h->rbtree, &data.iface);
+	if (adt == IPSET_ADD) {
+		if (!ret) {
+			ret = iface_add(&h->rbtree, &data.iface);
+			if (ret)
+				return ret;
+		}
+	} else if (!ret)
+		return ret;
+
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
+}
+
+static int
+hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
+		   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+	struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netiface6_elem data = { .cidr = HOST_MASK };
+	u32 timeout = h->timeout;
+	char iface[IFNAMSIZ] = {};
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !tb[IPSET_ATTR_IFACE] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+	ip6_netmask(&data.ip, data.cidr);
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
+	data.iface = iface;
+	ret = iface_test(&h->rbtree, &data.iface);
+	if (adt == IPSET_ADD) {
+		if (!ret) {
+			ret = iface_add(&h->rbtree, &data.iface);
+			if (ret)
+				return ret;
+		}
+	} else if (!ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		if (cadt_flags & IPSET_FLAG_PHYSDEV)
+			data.physdev = 1;
+	}
+
+	ret = adtfn(set, &data, timeout, flags);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct ip_set_hash *h;
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h)
+		    + sizeof(struct ip_set_hash_nets)
+		      * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+	h->rbtree = RB_ROOT;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_netiface4_gc_init(set);
+		else
+			hash_netiface6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_netiface4_variant : &hash_netiface6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_netiface_type __read_mostly = {
+	.name		= "hash:net,iface",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_IFACE,
+	.dimension	= IPSET_DIM_TWO,
+	.family		= AF_UNSPEC,
+	.revision_min	= 0,
+	.create		= hash_netiface_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IFACE]	= { .type = NLA_NUL_STRING,
+					    .len = IPSET_MAXNAMELEN - 1 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_netiface_init(void)
+{
+	return ip_set_type_register(&hash_netiface_type);
+}
+
+static void __exit
+hash_netiface_fini(void)
+{
+	ip_set_type_unregister(&hash_netiface_type);
+}
+
+module_init(hash_netiface_init);
+module_exit(hash_netiface_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index e50d9bb..fe203d1 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -137,9 +137,18 @@
 #define HOST_MASK	32
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_netport4_data_next(struct ip_set_hash *h,
+			const struct hash_netport4_elem *d)
+{
+	h->next.ip = ntohl(d->ip);
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
-		   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		   const struct xt_action_param *par,
+		   enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -152,24 +161,24 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
 	data.ip &= ip_set_netmask(data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
-		   enum ipset_adt adt, u32 *lineno, u32 flags)
+		   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netport4_elem data = { .cidr = HOST_MASK };
-	u32 port, port_to;
+	u32 port, port_to, p = 0, ip = 0, ip_to, last;
 	u32 timeout = h->timeout;
 	bool with_ports = false;
 	int ret;
@@ -183,15 +192,15 @@
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
 	if (ret)
 		return ret;
 
-	if (tb[IPSET_ATTR_CIDR])
+	if (tb[IPSET_ATTR_CIDR]) {
 		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-	if (!data.cidr)
-		return -IPSET_ERR_INVALID_CIDR;
-	data.ip &= ip_set_netmask(data.cidr);
+		if (!data.cidr)
+			return -IPSET_ERR_INVALID_CIDR;
+	}
 
 	if (tb[IPSET_ATTR_PORT])
 		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -216,24 +225,47 @@
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
 
-	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
-		ret = adtfn(set, &data, timeout);
+	with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+	if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
+		data.ip = htonl(ip & ip_set_hostmask(data.cidr));
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
-	port = ntohs(data.port);
-	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
-	if (port > port_to)
-		swap(port, port_to);
-
-	for (; port <= port_to; port++) {
-		data.port = htons(port);
-		ret = adtfn(set, &data, timeout);
-
-		if (ret && !ip_set_eexist(ret, flags))
+	port = port_to = ntohs(data.port);
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port_to < port)
+			swap(port, port_to);
+	}
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
 			return ret;
-		else
-			ret = 0;
+		if (ip_to < ip)
+			swap(ip, ip_to);
+		if (ip + UINT_MAX == ip_to)
+			return -IPSET_ERR_HASH_RANGE;
+	} else {
+		ip_set_mask_from_to(ip, ip_to, data.cidr);
+	}
+
+	if (retried)
+		ip = h->next.ip;
+	while (!after(ip, ip_to)) {
+		data.ip = htonl(ip);
+		last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
+		p = retried && ip == h->next.ip ? h->next.port : port;
+		for (; p <= port_to; p++) {
+			data.port = htons(p);
+			ret = adtfn(set, &data, timeout, flags);
+
+			if (ret && !ip_set_eexist(ret, flags))
+				return ret;
+			else
+				ret = 0;
+		}
+		ip = last + 1;
 	}
 	return ret;
 }
@@ -351,9 +383,17 @@
 #define HOST_MASK	128
 #include <linux/netfilter/ipset/ip_set_ahash.h>
 
+static inline void
+hash_netport6_data_next(struct ip_set_hash *h,
+			const struct hash_netport6_elem *d)
+{
+	h->next.port = ntohs(d->port);
+}
+
 static int
 hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
-		   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+		   const struct xt_action_param *par,
+		   enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -366,19 +406,19 @@
 	if (adt == IPSET_TEST)
 		data.cidr = HOST_MASK;
 
-	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+	if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
 				 &data.port, &data.proto))
 		return -EINVAL;
 
-	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
 	ip6_netmask(&data.ip, data.cidr);
 
-	return adtfn(set, &data, h->timeout);
+	return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
 }
 
 static int
 hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
-		   enum ipset_adt adt, u32 *lineno, u32 flags)
+		   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	const struct ip_set_hash *h = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
@@ -393,6 +433,8 @@
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
 		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
 		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(tb[IPSET_ATTR_IP_TO]))
+		return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
 
 	if (tb[IPSET_ATTR_LINENO])
 		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -431,7 +473,7 @@
 	}
 
 	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 		return ip_set_eexist(ret, flags) ? 0 : ret;
 	}
 
@@ -440,9 +482,11 @@
 	if (port > port_to)
 		swap(port, port_to);
 
+	if (retried)
+		port = h->next.port;
 	for (; port <= port_to; port++) {
 		data.port = htons(port);
-		ret = adtfn(set, &data, timeout);
+		ret = adtfn(set, &data, timeout, flags);
 
 		if (ret && !ip_set_eexist(ret, flags))
 			return ret;
@@ -528,7 +572,9 @@
 	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT,
 	.dimension	= IPSET_DIM_TWO,
 	.family		= AF_UNSPEC,
-	.revision	= 1,
+	.revision_min	= 0,
+	/*		  1	   SCTP and UDPLITE support added */
+	.revision_max	= 2,	/* Range as input support for IPv4 added */
 	.create		= hash_netport_create,
 	.create_policy	= {
 		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
@@ -540,6 +586,7 @@
 	},
 	.adt_policy	= {
 		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
 		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
 		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
 		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e9159e9..4d10819 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -72,7 +72,8 @@
 
 static int
 list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
-	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+	      const struct xt_action_param *par,
+	      enum ipset_adt adt, const struct ip_set_adt_opt *opt)
 {
 	struct list_set *map = set->data;
 	struct set_elem *elem;
@@ -87,17 +88,17 @@
 			continue;
 		switch (adt) {
 		case IPSET_TEST:
-			ret = ip_set_test(elem->id, skb, pf, dim, flags);
+			ret = ip_set_test(elem->id, skb, par, opt);
 			if (ret > 0)
 				return ret;
 			break;
 		case IPSET_ADD:
-			ret = ip_set_add(elem->id, skb, pf, dim, flags);
+			ret = ip_set_add(elem->id, skb, par, opt);
 			if (ret == 0)
 				return ret;
 			break;
 		case IPSET_DEL:
-			ret = ip_set_del(elem->id, skb, pf, dim, flags);
+			ret = ip_set_del(elem->id, skb, par, opt);
 			if (ret == 0)
 				return ret;
 			break;
@@ -109,15 +110,28 @@
 }
 
 static bool
-next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
 {
 	const struct set_elem *elem;
 
-	if (i + 1 < map->size) {
-		elem = list_set_elem(map, i + 1);
+	if (i < map->size) {
+		elem = list_set_elem(map, i);
+		return elem->id == id;
+	}
+
+	return 0;
+}
+
+static bool
+id_eq_timeout(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+	const struct set_elem *elem;
+
+	if (i < map->size) {
+		elem = list_set_elem(map, i);
 		return !!(elem->id == id &&
 			  !(with_timeout(map->timeout) &&
-			    list_set_expired(map, i + 1)));
+			    list_set_expired(map, i)));
 	}
 
 	return 0;
@@ -190,12 +204,26 @@
 	return 0;
 }
 
+static void
+cleanup_entries(struct list_set *map)
+{
+	struct set_telem *e;
+	u32 i;
+
+	for (i = 0; i < map->size; i++) {
+		e = list_set_telem(map, i);
+		if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
+			list_set_del(map, i);
+	}
+}
+
 static int
 list_set_uadt(struct ip_set *set, struct nlattr *tb[],
-	      enum ipset_adt adt, u32 *lineno, u32 flags)
+	      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
 	struct list_set *map = set->data;
 	bool with_timeout = with_timeout(map->timeout);
+	bool flag_exist = flags & IPSET_FLAG_EXIST;
 	int before = 0;
 	u32 timeout = map->timeout;
 	ip_set_id_t id, refid = IPSET_INVALID_ID;
@@ -248,6 +276,8 @@
 		}
 		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
 	}
+	if (with_timeout && adt != IPSET_TEST)
+		cleanup_entries(map);
 
 	switch (adt) {
 	case IPSET_TEST:
@@ -259,22 +289,37 @@
 			else if (with_timeout && list_set_expired(map, i))
 				continue;
 			else if (before > 0 && elem->id == id)
-				ret = next_id_eq(map, i, refid);
+				ret = id_eq_timeout(map, i + 1, refid);
 			else if (before < 0 && elem->id == refid)
-				ret = next_id_eq(map, i, id);
+				ret = id_eq_timeout(map, i + 1, id);
 			else if (before == 0 && elem->id == id)
 				ret = 1;
 		}
 		break;
 	case IPSET_ADD:
-		for (i = 0; i < map->size && !ret; i++) {
+		for (i = 0; i < map->size; i++) {
 			elem = list_set_elem(map, i);
-			if (elem->id == id &&
-			    !(with_timeout && list_set_expired(map, i)))
+			if (elem->id != id)
+				continue;
+			if (!(with_timeout && flag_exist)) {
 				ret = -IPSET_ERR_EXIST;
+				goto finish;
+			} else {
+				struct set_telem *e = list_set_telem(map, i);
+
+				if ((before > 1 &&
+				     !id_eq(map, i + 1, refid)) ||
+				    (before < 0 &&
+				     (i == 0 || !id_eq(map, i - 1, refid)))) {
+					ret = -IPSET_ERR_EXIST;
+					goto finish;
+				}
+				e->timeout = ip_set_timeout_set(timeout);
+				ip_set_put_byindex(id);
+				ret = 0;
+				goto finish;
+			}
 		}
-		if (ret == -IPSET_ERR_EXIST)
-			break;
 		ret = -IPSET_ERR_LIST_FULL;
 		for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
 			elem = list_set_elem(map, i);
@@ -283,9 +328,7 @@
 					: list_set_add(map, i, id, timeout);
 			else if (elem->id != refid)
 				continue;
-			else if (with_timeout && list_set_expired(map, i))
-				ret = -IPSET_ERR_REF_EXIST;
-			else if (before)
+			else if (before > 0)
 				ret = list_set_add(map, i, id, timeout);
 			else if (i + 1 < map->size)
 				ret = list_set_add(map, i + 1, id, timeout);
@@ -299,16 +342,12 @@
 				ret = before != 0 ? -IPSET_ERR_REF_EXIST
 						  : -IPSET_ERR_EXIST;
 				break;
-			} else if (with_timeout && list_set_expired(map, i))
-				continue;
-			else if (elem->id == id &&
-				 (before == 0 ||
-				  (before > 0 &&
-				   next_id_eq(map, i, refid))))
+			} else if (elem->id == id &&
+				   (before == 0 ||
+				    (before > 0 && id_eq(map, i + 1, refid))))
 				ret = list_set_del(map, i);
-			else if (before < 0 &&
-				 elem->id == refid &&
-				 next_id_eq(map, i, id))
+			else if (elem->id == refid &&
+				 before < 0 && id_eq(map, i + 1, id))
 				ret = list_set_del(map, i + 1);
 		}
 		break;
@@ -454,15 +493,9 @@
 {
 	struct ip_set *set = (struct ip_set *) ul_set;
 	struct list_set *map = set->data;
-	struct set_telem *e;
-	u32 i;
 
 	write_lock_bh(&set->lock);
-	for (i = 0; i < map->size; i++) {
-		e = list_set_telem(map, i);
-		if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
-			list_set_del(map, i);
-	}
+	cleanup_entries(map);
 	write_unlock_bh(&set->lock);
 
 	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
@@ -543,7 +576,8 @@
 	.features	= IPSET_TYPE_NAME | IPSET_DUMP_LAST,
 	.dimension	= IPSET_DIM_ONE,
 	.family		= AF_UNSPEC,
-	.revision	= 0,
+	.revision_min	= 0,
+	.revision_max	= 0,
 	.create		= list_set_create,
 	.create_policy	= {
 		[IPSET_ATTR_SIZE]	= { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
index 23f8c81..bd13d66 100644
--- a/net/netfilter/ipset/pfxlen.c
+++ b/net/netfilter/ipset/pfxlen.c
@@ -148,7 +148,7 @@
 EXPORT_SYMBOL_GPL(ip_set_netmask_map);
 
 #undef  E
-#define E(a, b, c, d) 						\
+#define E(a, b, c, d)						\
 	{.ip6 = { (__force __be32) a, (__force __be32) b,	\
 		  (__force __be32) c, (__force __be32) d,	\
 	} }
@@ -289,3 +289,24 @@
 	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
 };
 EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
+
+/* Find the largest network which matches the range from left, in host order. */
+u32
+ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr)
+{
+	u32 last;
+	u8 i;
+
+	for (i = 1; i < 32; i++) {
+		if ((from & ip_set_hostmask(i)) != from)
+			continue;
+		last = from | ~ip_set_hostmask(i);
+		if (!after(last, to)) {
+			*cidr = i;
+			return last;
+		}
+	}
+	*cidr = 32;
+	return from;
+}
+EXPORT_SYMBOL_GPL(ip_set_range_to_cidr);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 059af31..fe6cb43 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -576,7 +576,7 @@
 };
 #endif
 
-int __net_init __ip_vs_app_init(struct net *net)
+int __net_init ip_vs_app_net_init(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -585,17 +585,7 @@
 	return 0;
 }
 
-void __net_exit __ip_vs_app_cleanup(struct net *net)
+void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
 	proc_net_remove(net, "ip_vs_app");
 }
-
-int __init ip_vs_app_init(void)
-{
-	return 0;
-}
-
-
-void ip_vs_app_cleanup(void)
-{
-}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 782db27..12571fb 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1255,7 +1255,7 @@
 /*
  * per netns init and exit
  */
-int __net_init __ip_vs_conn_init(struct net *net)
+int __net_init ip_vs_conn_net_init(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -1266,7 +1266,7 @@
 	return 0;
 }
 
-void __net_exit __ip_vs_conn_cleanup(struct net *net)
+void __net_exit ip_vs_conn_net_cleanup(struct net *net)
 {
 	/* flush all the connection entries first */
 	ip_vs_conn_flush(net);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 24c28d2..e33d48c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1384,7 +1384,7 @@
 		offset += 2 * sizeof(__u16);
 	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
 
-  out:
+out:
 	__ip_vs_conn_put(cp);
 
 	return verdict;
@@ -1891,22 +1891,22 @@
 	atomic_inc(&ipvs_netns_cnt);
 	net->ipvs = ipvs;
 
-	if (__ip_vs_estimator_init(net) < 0)
+	if (ip_vs_estimator_net_init(net) < 0)
 		goto estimator_fail;
 
-	if (__ip_vs_control_init(net) < 0)
+	if (ip_vs_control_net_init(net) < 0)
 		goto control_fail;
 
-	if (__ip_vs_protocol_init(net) < 0)
+	if (ip_vs_protocol_net_init(net) < 0)
 		goto protocol_fail;
 
-	if (__ip_vs_app_init(net) < 0)
+	if (ip_vs_app_net_init(net) < 0)
 		goto app_fail;
 
-	if (__ip_vs_conn_init(net) < 0)
+	if (ip_vs_conn_net_init(net) < 0)
 		goto conn_fail;
 
-	if (__ip_vs_sync_init(net) < 0)
+	if (ip_vs_sync_net_init(net) < 0)
 		goto sync_fail;
 
 	printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
@@ -1917,27 +1917,27 @@
  */
 
 sync_fail:
-	__ip_vs_conn_cleanup(net);
+	ip_vs_conn_net_cleanup(net);
 conn_fail:
-	__ip_vs_app_cleanup(net);
+	ip_vs_app_net_cleanup(net);
 app_fail:
-	__ip_vs_protocol_cleanup(net);
+	ip_vs_protocol_net_cleanup(net);
 protocol_fail:
-	__ip_vs_control_cleanup(net);
+	ip_vs_control_net_cleanup(net);
 control_fail:
-	__ip_vs_estimator_cleanup(net);
+	ip_vs_estimator_net_cleanup(net);
 estimator_fail:
 	return -ENOMEM;
 }
 
 static void __net_exit __ip_vs_cleanup(struct net *net)
 {
-	__ip_vs_service_cleanup(net);	/* ip_vs_flush() with locks */
-	__ip_vs_conn_cleanup(net);
-	__ip_vs_app_cleanup(net);
-	__ip_vs_protocol_cleanup(net);
-	__ip_vs_control_cleanup(net);
-	__ip_vs_estimator_cleanup(net);
+	ip_vs_service_net_cleanup(net);	/* ip_vs_flush() with locks */
+	ip_vs_conn_net_cleanup(net);
+	ip_vs_app_net_cleanup(net);
+	ip_vs_protocol_net_cleanup(net);
+	ip_vs_control_net_cleanup(net);
+	ip_vs_estimator_net_cleanup(net);
 	IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
 }
 
@@ -1946,7 +1946,7 @@
 	EnterFunction(2);
 	net_ipvs(net)->enable = 0;	/* Disable packet reception */
 	smp_wmb();
-	__ip_vs_sync_cleanup(net);
+	ip_vs_sync_net_cleanup(net);
 	LeaveFunction(2);
 }
 
@@ -1968,36 +1968,23 @@
 {
 	int ret;
 
-	ip_vs_estimator_init();
 	ret = ip_vs_control_init();
 	if (ret < 0) {
 		pr_err("can't setup control.\n");
-		goto cleanup_estimator;
+		goto exit;
 	}
 
 	ip_vs_protocol_init();
 
-	ret = ip_vs_app_init();
-	if (ret < 0) {
-		pr_err("can't setup application helper.\n");
-		goto cleanup_protocol;
-	}
-
 	ret = ip_vs_conn_init();
 	if (ret < 0) {
 		pr_err("can't setup connection table.\n");
-		goto cleanup_app;
-	}
-
-	ret = ip_vs_sync_init();
-	if (ret < 0) {
-		pr_err("can't setup sync data.\n");
-		goto cleanup_conn;
+		goto cleanup_protocol;
 	}
 
 	ret = register_pernet_subsys(&ipvs_core_ops);	/* Alloc ip_vs struct */
 	if (ret < 0)
-		goto cleanup_sync;
+		goto cleanup_conn;
 
 	ret = register_pernet_device(&ipvs_core_dev_ops);
 	if (ret < 0)
@@ -2017,17 +2004,12 @@
 	unregister_pernet_device(&ipvs_core_dev_ops);
 cleanup_sub:
 	unregister_pernet_subsys(&ipvs_core_ops);
-cleanup_sync:
-	ip_vs_sync_cleanup();
-  cleanup_conn:
+cleanup_conn:
 	ip_vs_conn_cleanup();
-  cleanup_app:
-	ip_vs_app_cleanup();
-  cleanup_protocol:
+cleanup_protocol:
 	ip_vs_protocol_cleanup();
 	ip_vs_control_cleanup();
-  cleanup_estimator:
-	ip_vs_estimator_cleanup();
+exit:
 	return ret;
 }
 
@@ -2036,12 +2018,9 @@
 	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
 	unregister_pernet_device(&ipvs_core_dev_ops);
 	unregister_pernet_subsys(&ipvs_core_ops);	/* free ip_vs struct */
-	ip_vs_sync_cleanup();
 	ip_vs_conn_cleanup();
-	ip_vs_app_cleanup();
 	ip_vs_protocol_cleanup();
 	ip_vs_control_cleanup();
-	ip_vs_estimator_cleanup();
 	pr_info("ipvs unloaded.\n");
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 699c79a..be43fd8 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1334,9 +1334,9 @@
 		ip_vs_bind_pe(svc, pe);
 	}
 
-  out_unlock:
+out_unlock:
 	write_unlock_bh(&__ip_vs_svc_lock);
-  out:
+out:
 	ip_vs_scheduler_put(old_sched);
 	ip_vs_pe_put(old_pe);
 	return ret;
@@ -1483,7 +1483,7 @@
  *	Delete service by {netns} in the service table.
  *	Called by __ip_vs_cleanup()
  */
-void __ip_vs_service_cleanup(struct net *net)
+void ip_vs_service_net_cleanup(struct net *net)
 {
 	EnterFunction(2);
 	/* Check for "full" addressed entries */
@@ -1662,7 +1662,7 @@
 /*
  *	IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
  *	Do not change order or insert new entries without
- *	align with netns init in __ip_vs_control_init()
+ *	align with netns init in ip_vs_control_net_init()
  */
 
 static struct ctl_table vs_vars[] = {
@@ -2469,7 +2469,7 @@
 			count++;
 		}
 	}
-  out:
+out:
 	return ret;
 }
 
@@ -2707,7 +2707,7 @@
 		ret = -EINVAL;
 	}
 
-  out:
+out:
 	mutex_unlock(&__ip_vs_mutex);
 	return ret;
 }
@@ -3595,7 +3595,7 @@
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-int __net_init __ip_vs_control_init_sysctl(struct net *net)
+int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
 	int idx;
 	struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3654,7 +3654,7 @@
 	return 0;
 }
 
-void __net_init __ip_vs_control_cleanup_sysctl(struct net *net)
+void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3665,8 +3665,8 @@
 
 #else
 
-int __net_init __ip_vs_control_init_sysctl(struct net *net) { return 0; }
-void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
+int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
+void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
@@ -3674,7 +3674,7 @@
 	.notifier_call = ip_vs_dst_event,
 };
 
-int __net_init __ip_vs_control_init(struct net *net)
+int __net_init ip_vs_control_net_init(struct net *net)
 {
 	int idx;
 	struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3702,7 +3702,7 @@
 	proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
 			     &ip_vs_stats_percpu_fops);
 
-	if (__ip_vs_control_init_sysctl(net))
+	if (ip_vs_control_net_init_sysctl(net))
 		goto err;
 
 	return 0;
@@ -3712,13 +3712,13 @@
 	return -ENOMEM;
 }
 
-void __net_exit __ip_vs_control_cleanup(struct net *net)
+void __net_exit ip_vs_control_net_cleanup(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	ip_vs_trash_cleanup(net);
 	ip_vs_stop_estimator(net, &ipvs->tot_stats);
-	__ip_vs_control_cleanup_sysctl(net);
+	ip_vs_control_net_cleanup_sysctl(net);
 	proc_net_remove(net, "ip_vs_stats_percpu");
 	proc_net_remove(net, "ip_vs_stats");
 	proc_net_remove(net, "ip_vs");
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 508cce9..0fac601 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -192,7 +192,7 @@
 	dst->outbps = (e->outbps + 0xF) >> 5;
 }
 
-int __net_init __ip_vs_estimator_init(struct net *net)
+int __net_init ip_vs_estimator_net_init(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -203,16 +203,7 @@
 	return 0;
 }
 
-void __net_exit __ip_vs_estimator_cleanup(struct net *net)
+void __net_exit ip_vs_estimator_net_cleanup(struct net *net)
 {
 	del_timer_sync(&net_ipvs(net)->est_timer);
 }
-
-int __init ip_vs_estimator_init(void)
-{
-	return 0;
-}
-
-void ip_vs_estimator_cleanup(void)
-{
-}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index af63553..4490a32 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -44,8 +44,8 @@
 #include <net/ip_vs.h>
 
 
-#define SERVER_STRING "227 Entering Passive Mode ("
-#define CLIENT_STRING "PORT "
+#define SERVER_STRING "227 "
+#define CLIENT_STRING "PORT"
 
 
 /*
@@ -79,14 +79,17 @@
 
 /*
  * Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started
- * with the "pattern" and terminated with the "term" character.
+ * with the "pattern", ignoring before "skip" and terminated with
+ * the "term" character.
  * <addr,port> is in network order.
  */
 static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
-				  const char *pattern, size_t plen, char term,
+				  const char *pattern, size_t plen,
+				  char skip, char term,
 				  __be32 *addr, __be16 *port,
 				  char **start, char **end)
 {
+	char *s, c;
 	unsigned char p[6];
 	int i = 0;
 
@@ -101,19 +104,38 @@
 	if (strnicmp(data, pattern, plen) != 0) {
 		return 0;
 	}
-	*start = data + plen;
+	s = data + plen;
+	if (skip) {
+		int found = 0;
 
-	for (data = *start; *data != term; data++) {
+		for (;; s++) {
+			if (s == data_limit)
+				return -1;
+			if (!found) {
+				if (*s == skip)
+					found = 1;
+			} else if (*s != skip) {
+				break;
+			}
+		}
+	}
+
+	for (data = s; ; data++) {
 		if (data == data_limit)
 			return -1;
+		if (*data == term)
+			break;
 	}
 	*end = data;
 
 	memset(p, 0, sizeof(p));
-	for (data = *start; data != *end; data++) {
-		if (*data >= '0' && *data <= '9') {
-			p[i] = p[i]*10 + *data - '0';
-		} else if (*data == ',' && i < 5) {
+	for (data = s; ; data++) {
+		c = *data;
+		if (c == term)
+			break;
+		if (c >= '0' && c <= '9') {
+			p[i] = p[i]*10 + c - '0';
+		} else if (c == ',' && i < 5) {
 			i++;
 		} else {
 			/* unexpected character */
@@ -124,8 +146,9 @@
 	if (i != 5)
 		return -1;
 
-	*addr = get_unaligned((__be32 *)p);
-	*port = get_unaligned((__be16 *)(p + 4));
+	*start = s;
+	*addr = get_unaligned((__be32 *) p);
+	*port = get_unaligned((__be16 *) (p + 4));
 	return 1;
 }
 
@@ -185,7 +208,8 @@
 
 		if (ip_vs_ftp_get_addrport(data, data_limit,
 					   SERVER_STRING,
-					   sizeof(SERVER_STRING)-1, ')',
+					   sizeof(SERVER_STRING)-1,
+					   '(', ')',
 					   &from.ip, &port,
 					   &start, &end) != 1)
 			return 1;
@@ -345,7 +369,7 @@
 	 */
 	if (ip_vs_ftp_get_addrport(data_start, data_limit,
 				   CLIENT_STRING, sizeof(CLIENT_STRING)-1,
-				   '\r', &to.ip, &port,
+				   ' ', '\r', &to.ip, &port,
 				   &start, &end) != 1)
 		return 1;
 
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index eb86028..52d073c 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -316,7 +316,7 @@
 /*
  * per network name-space init
  */
-int __net_init __ip_vs_protocol_init(struct net *net)
+int __net_init ip_vs_protocol_net_init(struct net *net)
 {
 #ifdef CONFIG_IP_VS_PROTO_TCP
 	register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
@@ -336,7 +336,7 @@
 	return 0;
 }
 
-void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data *pd;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e292e5b..7ee7215 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1663,7 +1663,7 @@
 /*
  * Initialize data struct for each netns
  */
-int __net_init __ip_vs_sync_init(struct net *net)
+int __net_init ip_vs_sync_net_init(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -1677,7 +1677,7 @@
 	return 0;
 }
 
-void __ip_vs_sync_cleanup(struct net *net)
+void ip_vs_sync_net_cleanup(struct net *net)
 {
 	int retc;
 
@@ -1689,12 +1689,3 @@
 	if (retc && retc != -ESRCH)
 		pr_err("Failed to stop Backup Daemon\n");
 }
-
-int __init ip_vs_sync_init(void)
-{
-	return 0;
-}
-
-void ip_vs_sync_cleanup(void)
-{
-}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 482e90c..7dec88a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -970,7 +970,7 @@
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP)
 		return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
-					  ctnetlink_done);
+					  ctnetlink_done, 0);
 
 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
 	if (err < 0)
@@ -1840,7 +1840,7 @@
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
 		return netlink_dump_start(ctnl, skb, nlh,
 					  ctnetlink_exp_dump_table,
-					  ctnetlink_exp_done);
+					  ctnetlink_exp_done, 0);
 	}
 
 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 782e519..0221d10 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -5,7 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/skbuff.h>
@@ -95,8 +95,11 @@
 	if (info->helper[0]) {
 		ret = -ENOENT;
 		proto = xt_ct_find_proto(par);
-		if (!proto)
+		if (!proto) {
+			pr_info("You must specify a L4 protocol, "
+				"and not use inversions on it.\n");
 			goto err3;
+		}
 
 		ret = -ENOMEM;
 		help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
@@ -107,8 +110,10 @@
 		help->helper = nf_conntrack_helper_try_module_get(info->helper,
 								  par->family,
 								  proto);
-		if (help->helper == NULL)
+		if (help->helper == NULL) {
+			pr_info("No such helper \"%s\"\n", info->helper);
 			goto err3;
+		}
 	}
 
 	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index b3babae..19461c4 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -29,23 +29,33 @@
 
 static inline int
 match_set(ip_set_id_t index, const struct sk_buff *skb,
-	  u8 pf, u8 dim, u8 flags, int inv)
+	  const struct xt_action_param *par,
+	  const struct ip_set_adt_opt *opt, int inv)
 {
-	if (ip_set_test(index, skb, pf, dim, flags))
+	if (ip_set_test(index, skb, par, opt))
 		inv = !inv;
 	return inv;
 }
 
+#define ADT_OPT(n, f, d, fs, cfs, t)	\
+const struct ip_set_adt_opt n = {	\
+	.family	= f,			\
+	.dim = d,			\
+	.flags = fs,			\
+	.cmdflags = cfs,		\
+	.timeout = t,			\
+}
+
 /* Revision 0 interface: backward compatible with netfilter/iptables */
 
 static bool
 set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v0 *info = par->matchinfo;
+	ADT_OPT(opt, par->family, info->match_set.u.compat.dim,
+		info->match_set.u.compat.flags, 0, UINT_MAX);
 
-	return match_set(info->match_set.index, skb, par->family,
-			 info->match_set.u.compat.dim,
-			 info->match_set.u.compat.flags,
+	return match_set(info->match_set.index, skb, par, &opt,
 			 info->match_set.u.compat.flags & IPSET_INV_MATCH);
 }
 
@@ -103,15 +113,15 @@
 set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v0 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
+		info->add_set.u.compat.flags, 0, UINT_MAX);
+	ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
+		info->del_set.u.compat.flags, 0, UINT_MAX);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_add(info->add_set.index, skb, par->family,
-			   info->add_set.u.compat.dim,
-			   info->add_set.u.compat.flags);
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
 	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_del(info->del_set.index, skb, par->family,
-			   info->del_set.u.compat.dim,
-			   info->del_set.u.compat.flags);
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
 
 	return XT_CONTINUE;
 }
@@ -170,23 +180,23 @@
 		ip_set_nfnl_put(info->del_set.index);
 }
 
-/* Revision 1: current interface to netfilter/iptables */
+/* Revision 1 match and target */
 
 static bool
-set_match(const struct sk_buff *skb, struct xt_action_param *par)
+set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	const struct xt_set_info_match *info = par->matchinfo;
+	const struct xt_set_info_match_v1 *info = par->matchinfo;
+	ADT_OPT(opt, par->family, info->match_set.dim,
+		info->match_set.flags, 0, UINT_MAX);
 
-	return match_set(info->match_set.index, skb, par->family,
-			 info->match_set.dim,
-			 info->match_set.flags,
+	return match_set(info->match_set.index, skb, par, &opt,
 			 info->match_set.flags & IPSET_INV_MATCH);
 }
 
 static int
-set_match_checkentry(const struct xt_mtchk_param *par)
+set_match_v1_checkentry(const struct xt_mtchk_param *par)
 {
-	struct xt_set_info_match *info = par->matchinfo;
+	struct xt_set_info_match_v1 *info = par->matchinfo;
 	ip_set_id_t index;
 
 	index = ip_set_nfnl_get_byindex(info->match_set.index);
@@ -207,36 +217,34 @@
 }
 
 static void
-set_match_destroy(const struct xt_mtdtor_param *par)
+set_match_v1_destroy(const struct xt_mtdtor_param *par)
 {
-	struct xt_set_info_match *info = par->matchinfo;
+	struct xt_set_info_match_v1 *info = par->matchinfo;
 
 	ip_set_nfnl_put(info->match_set.index);
 }
 
 static unsigned int
-set_target(struct sk_buff *skb, const struct xt_action_param *par)
+set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
 {
-	const struct xt_set_info_target *info = par->targinfo;
+	const struct xt_set_info_target_v1 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.dim,
+		info->add_set.flags, 0, UINT_MAX);
+	ADT_OPT(del_opt, par->family, info->del_set.dim,
+		info->del_set.flags, 0, UINT_MAX);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
-		ip_set_add(info->add_set.index,
-			   skb, par->family,
-			   info->add_set.dim,
-			   info->add_set.flags);
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
 	if (info->del_set.index != IPSET_INVALID_ID)
-		ip_set_del(info->del_set.index,
-			   skb, par->family,
-			   info->del_set.dim,
-			   info->del_set.flags);
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
 
 	return XT_CONTINUE;
 }
 
 static int
-set_target_checkentry(const struct xt_tgchk_param *par)
+set_target_v1_checkentry(const struct xt_tgchk_param *par)
 {
-	const struct xt_set_info_target *info = par->targinfo;
+	const struct xt_set_info_target_v1 *info = par->targinfo;
 	ip_set_id_t index;
 
 	if (info->add_set.index != IPSET_INVALID_ID) {
@@ -273,9 +281,9 @@
 }
 
 static void
-set_target_destroy(const struct xt_tgdtor_param *par)
+set_target_v1_destroy(const struct xt_tgdtor_param *par)
 {
-	const struct xt_set_info_target *info = par->targinfo;
+	const struct xt_set_info_target_v1 *info = par->targinfo;
 
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_nfnl_put(info->add_set.index);
@@ -283,6 +291,28 @@
 		ip_set_nfnl_put(info->del_set.index);
 }
 
+/* Revision 2 target */
+
+static unsigned int
+set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target_v2 *info = par->targinfo;
+	ADT_OPT(add_opt, par->family, info->add_set.dim,
+		info->add_set.flags, info->flags, info->timeout);
+	ADT_OPT(del_opt, par->family, info->del_set.dim,
+		info->del_set.flags, 0, UINT_MAX);
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index, skb, par, &add_opt);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index, skb, par, &del_opt);
+
+	return XT_CONTINUE;
+}
+
+#define set_target_v2_checkentry	set_target_v1_checkentry
+#define set_target_v2_destroy		set_target_v1_destroy
+
 static struct xt_match set_matches[] __read_mostly = {
 	{
 		.name		= "set",
@@ -298,20 +328,20 @@
 		.name		= "set",
 		.family		= NFPROTO_IPV4,
 		.revision	= 1,
-		.match		= set_match,
-		.matchsize	= sizeof(struct xt_set_info_match),
-		.checkentry	= set_match_checkentry,
-		.destroy	= set_match_destroy,
+		.match		= set_match_v1,
+		.matchsize	= sizeof(struct xt_set_info_match_v1),
+		.checkentry	= set_match_v1_checkentry,
+		.destroy	= set_match_v1_destroy,
 		.me		= THIS_MODULE
 	},
 	{
 		.name		= "set",
 		.family		= NFPROTO_IPV6,
 		.revision	= 1,
-		.match		= set_match,
-		.matchsize	= sizeof(struct xt_set_info_match),
-		.checkentry	= set_match_checkentry,
-		.destroy	= set_match_destroy,
+		.match		= set_match_v1,
+		.matchsize	= sizeof(struct xt_set_info_match_v1),
+		.checkentry	= set_match_v1_checkentry,
+		.destroy	= set_match_v1_destroy,
 		.me		= THIS_MODULE
 	},
 };
@@ -331,20 +361,40 @@
 		.name		= "SET",
 		.revision	= 1,
 		.family		= NFPROTO_IPV4,
-		.target		= set_target,
-		.targetsize	= sizeof(struct xt_set_info_target),
-		.checkentry	= set_target_checkentry,
-		.destroy	= set_target_destroy,
+		.target		= set_target_v1,
+		.targetsize	= sizeof(struct xt_set_info_target_v1),
+		.checkentry	= set_target_v1_checkentry,
+		.destroy	= set_target_v1_destroy,
 		.me		= THIS_MODULE
 	},
 	{
 		.name		= "SET",
 		.revision	= 1,
 		.family		= NFPROTO_IPV6,
-		.target		= set_target,
-		.targetsize	= sizeof(struct xt_set_info_target),
-		.checkentry	= set_target_checkentry,
-		.destroy	= set_target_destroy,
+		.target		= set_target_v1,
+		.targetsize	= sizeof(struct xt_set_info_target_v1),
+		.checkentry	= set_target_v1_checkentry,
+		.destroy	= set_target_v1_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "SET",
+		.revision	= 2,
+		.family		= NFPROTO_IPV4,
+		.target		= set_target_v2,
+		.targetsize	= sizeof(struct xt_set_info_target_v2),
+		.checkentry	= set_target_v2_checkentry,
+		.destroy	= set_target_v2_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "SET",
+		.revision	= 2,
+		.family		= NFPROTO_IPV6,
+		.target		= set_target_v2,
+		.targetsize	= sizeof(struct xt_set_info_target_v2),
+		.checkentry	= set_target_v2_checkentry,
+		.destroy	= set_target_v2_destroy,
 		.me		= THIS_MODULE
 	},
 };
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 9c38658..8efd061 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -426,10 +426,9 @@
 					      audit_info);
 	switch (addr_len) {
 	case sizeof(struct in_addr): {
-		struct in_addr *addr4, *mask4;
+		const struct in_addr *addr4 = addr;
+		const struct in_addr *mask4 = mask;
 
-		addr4 = (struct in_addr *)addr;
-		mask4 = (struct in_addr *)mask;
 		ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
 		if (audit_buf != NULL)
 			netlbl_af4list_audit_addr(audit_buf, 1,
@@ -440,10 +439,9 @@
 	}
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	case sizeof(struct in6_addr): {
-		struct in6_addr *addr6, *mask6;
+		const struct in6_addr *addr6 = addr;
+		const struct in6_addr *mask6 = mask;
 
-		addr6 = (struct in6_addr *)addr;
-		mask6 = (struct in6_addr *)mask;
 		ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
 		if (audit_buf != NULL)
 			netlbl_af6list_audit_addr(audit_buf, 1,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6ef64ad..ca5276c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1659,13 +1659,10 @@
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 	struct netlink_callback *cb;
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
 	struct nlmsghdr *nlh;
 	int len, err = -ENOBUFS;
-
-	skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
-	if (!skb)
-		goto errout;
+	int alloc_size;
 
 	mutex_lock(nlk->cb_mutex);
 
@@ -1675,6 +1672,12 @@
 		goto errout_skb;
 	}
 
+	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+
+	skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
+	if (!skb)
+		goto errout_skb;
+
 	len = cb->dump(skb, cb);
 
 	if (len > 0) {
@@ -1713,7 +1716,6 @@
 errout_skb:
 	mutex_unlock(nlk->cb_mutex);
 	kfree_skb(skb);
-errout:
 	return err;
 }
 
@@ -1721,7 +1723,8 @@
 		       const struct nlmsghdr *nlh,
 		       int (*dump)(struct sk_buff *skb,
 				   struct netlink_callback *),
-		       int (*done)(struct netlink_callback *))
+		       int (*done)(struct netlink_callback *),
+		       u16 min_dump_alloc)
 {
 	struct netlink_callback *cb;
 	struct sock *sk;
@@ -1735,6 +1738,7 @@
 	cb->dump = dump;
 	cb->done = done;
 	cb->nlh = nlh;
+	cb->min_dump_alloc = min_dump_alloc;
 	atomic_inc(&skb->users);
 	cb->skb = skb;
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99..482fa57 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -525,7 +525,7 @@
 
 		genl_unlock();
 		err = netlink_dump_start(net->genl_sock, skb, nlh,
-					 ops->dumpit, ops->done);
+					 ops->dumpit, ops->done, 0);
 		genl_lock();
 		return err;
 	}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c0c3cda..461b16f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -975,7 +975,8 @@
 	struct sk_buff *skb;
 	struct net_device *dev;
 	__be16 proto;
-	int ifindex, err, reserve = 0;
+	bool need_rls_dev = false;
+	int err, reserve = 0;
 	void *ph;
 	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
 	int tp_len, size_max;
@@ -987,7 +988,7 @@
 
 	err = -EBUSY;
 	if (saddr == NULL) {
-		ifindex	= po->ifindex;
+		dev = po->prot_hook.dev;
 		proto	= po->num;
 		addr	= NULL;
 	} else {
@@ -998,12 +999,12 @@
 					+ offsetof(struct sockaddr_ll,
 						sll_addr)))
 			goto out;
-		ifindex	= saddr->sll_ifindex;
 		proto	= saddr->sll_protocol;
 		addr	= saddr->sll_addr;
+		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+		need_rls_dev = true;
 	}
 
-	dev = dev_get_by_index(sock_net(&po->sk), ifindex);
 	err = -ENXIO;
 	if (unlikely(dev == NULL))
 		goto out;
@@ -1089,7 +1090,8 @@
 	__packet_set_status(po, ph, status);
 	kfree_skb(skb);
 out_put:
-	dev_put(dev);
+	if (need_rls_dev)
+		dev_put(dev);
 out:
 	mutex_unlock(&po->pg_vec_lock);
 	return err;
@@ -1127,8 +1129,9 @@
 	struct sk_buff *skb;
 	struct net_device *dev;
 	__be16 proto;
+	bool need_rls_dev = false;
 	unsigned char *addr;
-	int ifindex, err, reserve = 0;
+	int err, reserve = 0;
 	struct virtio_net_hdr vnet_hdr = { 0 };
 	int offset = 0;
 	int vnet_hdr_len;
@@ -1140,7 +1143,7 @@
 	 */
 
 	if (saddr == NULL) {
-		ifindex	= po->ifindex;
+		dev = po->prot_hook.dev;
 		proto	= po->num;
 		addr	= NULL;
 	} else {
@@ -1149,13 +1152,12 @@
 			goto out;
 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
 			goto out;
-		ifindex	= saddr->sll_ifindex;
 		proto	= saddr->sll_protocol;
 		addr	= saddr->sll_addr;
+		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+		need_rls_dev = true;
 	}
 
-
-	dev = dev_get_by_index(sock_net(sk), ifindex);
 	err = -ENXIO;
 	if (dev == NULL)
 		goto out_unlock;
@@ -1286,14 +1288,15 @@
 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
 		goto out_unlock;
 
-	dev_put(dev);
+	if (need_rls_dev)
+		dev_put(dev);
 
 	return len;
 
 out_free:
 	kfree_skb(skb);
 out_unlock:
-	if (dev)
+	if (dev && need_rls_dev)
 		dev_put(dev);
 out:
 	return err;
@@ -1343,6 +1346,10 @@
 		__dev_remove_pack(&po->prot_hook);
 		__sock_put(sk);
 	}
+	if (po->prot_hook.dev) {
+		dev_put(po->prot_hook.dev);
+		po->prot_hook.dev = NULL;
+	}
 	spin_unlock(&po->bind_lock);
 
 	packet_flush_mclist(sk);
@@ -1396,6 +1403,8 @@
 
 	po->num = protocol;
 	po->prot_hook.type = protocol;
+	if (po->prot_hook.dev)
+		dev_put(po->prot_hook.dev);
 	po->prot_hook.dev = dev;
 
 	po->ifindex = dev ? dev->ifindex : 0;
@@ -1440,10 +1449,8 @@
 	strlcpy(name, uaddr->sa_data, sizeof(name));
 
 	dev = dev_get_by_name(sock_net(sk), name);
-	if (dev) {
+	if (dev)
 		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
-		dev_put(dev);
-	}
 	return err;
 }
 
@@ -1471,8 +1478,6 @@
 			goto out;
 	}
 	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
-	if (dev)
-		dev_put(dev);
 
 out:
 	return err;
@@ -1681,6 +1686,8 @@
 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 			vnet_hdr.csum_start = skb_checksum_start_offset(skb);
 			vnet_hdr.csum_offset = skb->csum_offset;
+		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
 		} /* else everything is zero */
 
 		err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
@@ -2242,6 +2249,8 @@
 				}
 				if (msg == NETDEV_UNREGISTER) {
 					po->ifindex = -1;
+					if (po->prot_hook.dev)
+						dev_put(po->prot_hook.dev);
 					po->prot_hook.dev = NULL;
 				}
 				spin_unlock(&po->bind_lock);
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 438accb..d61f676 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -289,15 +289,16 @@
 
 int __init phonet_netlink_register(void)
 {
-	int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL);
+	int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit,
+				  NULL, NULL);
 	if (err)
 		return err;
 
 	/* Further __rtnl_register() cannot fail */
-	__rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL);
-	__rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit);
-	__rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL);
-	__rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL);
-	__rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit);
+	__rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL);
+	__rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL);
+	__rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL);
+	__rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL);
+	__rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL);
 	return 0;
 }
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 2f6b3fc..637bde5 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -35,6 +35,7 @@
 #include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/jhash.h>
+#include <linux/ratelimit.h>
 #include "rds.h"
 
 #define BIND_HASH_SIZE 1024
@@ -185,8 +186,7 @@
 	if (!trans) {
 		ret = -EADDRNOTAVAIL;
 		rds_remove_bound(rs);
-		if (printk_ratelimit())
-			printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
+		printk_ratelimited(KERN_INFO "RDS: rds_bind() could not find a transport, "
 				"load rds_tcp or rds_rdma?\n");
 		goto out;
 	}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 4297d92..edfaaaf 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,6 +3,7 @@
 
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
+#include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include "rds.h"
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fd453dd..cd67026 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 #include "ib.h"
@@ -435,13 +436,12 @@
 		version = RDS_PROTOCOL_3_0;
 		while ((common >>= 1) != 0)
 			version++;
-	} else if (printk_ratelimit()) {
-		printk(KERN_NOTICE "RDS: Connection from %pI4 using "
+	}
+	printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
 			"incompatible protocol version %u.%u\n",
 			&dp->dp_saddr,
 			dp->dp_protocol_major,
 			dp->dp_protocol_minor);
-	}
 	return version;
 }
 
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 7c4dce8..e590949 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/device.h>
 #include <linux/dmapool.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 #include "ib.h"
@@ -207,8 +208,7 @@
 		}
 		break;
 	default:
-		if (printk_ratelimit())
-			printk(KERN_NOTICE
+		printk_ratelimited(KERN_NOTICE
 			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
 			       __func__, send->s_wr.opcode);
 		break;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 9015192..04ce3b1 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -1,6 +1,7 @@
 #ifndef _RDS_IW_H
 #define _RDS_IW_H
 
+#include <linux/interrupt.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 #include "rds.h"
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index c12db66..9556d28 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 #include "iw.h"
@@ -258,8 +259,7 @@
 	 */
 	rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
 	if (!rds_iwdev) {
-		if (printk_ratelimit())
-			printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
+		printk_ratelimited(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
 					dev->name);
 		return -EOPNOTSUPP;
 	}
@@ -365,13 +365,12 @@
 		version = RDS_PROTOCOL_3_0;
 		while ((common >>= 1) != 0)
 			version++;
-	} else if (printk_ratelimit()) {
-		printk(KERN_NOTICE "RDS: Connection from %pI4 using "
+	}
+	printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
 			"incompatible protocol version %u.%u\n",
 			&dp->dp_saddr,
 			dp->dp_protocol_major,
 			dp->dp_protocol_minor);
-	}
 	return version;
 }
 
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 6deaa77..8b77edb 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -32,6 +32,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 #include "iw.h"
@@ -729,8 +730,8 @@
 	failed_wr = &f_wr;
 	ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
 	BUG_ON(failed_wr != &f_wr);
-	if (ret && printk_ratelimit())
-		printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
+	if (ret)
+		printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
 			__func__, __LINE__, ret);
 	return ret;
 }
@@ -751,8 +752,8 @@
 
 	failed_wr = &s_wr;
 	ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
-	if (ret && printk_ratelimit()) {
-		printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
+	if (ret) {
+		printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
 			__func__, __LINE__, ret);
 		goto out;
 	}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 545d8ee..e40c3c5 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/device.h>
 #include <linux/dmapool.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 #include "iw.h"
@@ -258,8 +259,7 @@
 				 * when the SEND completes. */
 				break;
 			default:
-				if (printk_ratelimit())
-					printk(KERN_NOTICE
+				printk_ratelimited(KERN_NOTICE
 						"RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
 						__func__, send->s_wr.opcode);
 				break;
diff --git a/net/rds/send.c b/net/rds/send.c
index d58ae5f..aa57e22 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -35,6 +35,7 @@
 #include <net/sock.h>
 #include <linux/in.h>
 #include <linux/list.h>
+#include <linux/ratelimit.h>
 
 #include "rds.h"
 
@@ -1006,16 +1007,14 @@
 		goto out;
 
 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
-		if (printk_ratelimit())
-			printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
+		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
 			       &rm->rdma, conn->c_trans->xmit_rdma);
 		ret = -EOPNOTSUPP;
 		goto out;
 	}
 
 	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
-		if (printk_ratelimit())
-			printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
+		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
 			       &rm->atomic, conn->c_trans->xmit_atomic);
 		ret = -EOPNOTSUPP;
 		goto out;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a606025..2f64262 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1115,9 +1115,10 @@
 
 static int __init tc_action_init(void)
 {
-	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL);
-	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action);
+	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
+		      NULL);
 
 	return 0;
 }
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bb2c523..9563887 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -610,10 +610,10 @@
 
 static int __init tc_filter_init(void)
 {
-	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL);
+	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL);
 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
-						 tc_dump_tfilter);
+		      tc_dump_tfilter, NULL);
 
 	return 0;
 }
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6b86276..8182aef 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1792,12 +1792,12 @@
 	register_qdisc(&pfifo_head_drop_qdisc_ops);
 	register_qdisc(&mq_qdisc_ops);
 
-	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
-	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
-	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
-	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
-	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
+	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
+	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
+	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
 
 	return 0;
 }
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 3f08158..e25e490 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 4a62888..dc16b90 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -280,6 +280,8 @@
 	asoc->peer.asconf_capable = 0;
 	if (sctp_addip_noauth)
 		asoc->peer.asconf_capable = 1;
+	asoc->asconf_addr_del_pending = NULL;
+	asoc->src_out_of_asoc_ok = 0;
 
 	/* Create an input queue.  */
 	sctp_inq_init(&asoc->base.inqueue);
@@ -446,6 +448,10 @@
 
 	sctp_asconf_queue_teardown(asoc);
 
+	/* Free pending address space being deleted */
+	if (asoc->asconf_addr_del_pending != NULL)
+		kfree(asoc->asconf_addr_del_pending);
+
 	/* AUTH - Free the endpoint shared keys */
 	sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
 
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 83e3011..4ece451 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -430,7 +430,7 @@
 	list_for_each_entry(laddr, &bp->address_list, list) {
 		addr_buf = (union sctp_addr *)addrs;
 		for (i = 0; i < addrcnt; i++) {
-			addr = (union sctp_addr *)addr_buf;
+			addr = addr_buf;
 			af = sctp_get_af_specific(addr->v4.sin_family);
 			if (!af)
 				break;
@@ -534,6 +534,21 @@
 	return 0;
 }
 
+int sctp_is_ep_boundall(struct sock *sk)
+{
+	struct sctp_bind_addr *bp;
+	struct sctp_sockaddr_entry *addr;
+
+	bp = &sctp_sk(sk)->ep->base.bind_addr;
+	if (sctp_list_single_entry(&bp->address_list)) {
+		addr = list_entry(bp->address_list.next,
+				  struct sctp_sockaddr_entry, list);
+		if (sctp_is_any(sk, &addr->a))
+			return 1;
+	}
+	return 0;
+}
+
 /********************************************************************
  * 3rd Level Abstractions
  ********************************************************************/
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 741ed16..b7692aa 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -510,8 +510,7 @@
 	 * discard the packet.
 	 */
 	if (vtag == 0) {
-		chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr
-				+ sizeof(struct sctphdr));
+		chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
 		if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
 			  + sizeof(__be32) ||
 		    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0bb0d7c..aabaee4 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -112,6 +112,7 @@
 			addr->valid = 1;
 			spin_lock_bh(&sctp_local_addr_lock);
 			list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+			sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
 			spin_unlock_bh(&sctp_local_addr_lock);
 		}
 		break;
@@ -122,6 +123,7 @@
 			if (addr->a.sa.sa_family == AF_INET6 &&
 					ipv6_addr_equal(&addr->a.v6.sin6_addr,
 						&ifa->addr)) {
+				sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1c88c89..edc7532 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -754,6 +754,16 @@
 	 */
 
 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+		/* RFC 5061, 5.3
+		 * F1) This means that until such time as the ASCONF
+		 * containing the add is acknowledged, the sender MUST
+		 * NOT use the new IP address as a source for ANY SCTP
+		 * packet except on carrying an ASCONF Chunk.
+		 */
+		if (asoc->src_out_of_asoc_ok &&
+		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
+			continue;
+
 		list_del_init(&chunk->list);
 
 		/* Pick the right transport to use. */
@@ -881,6 +891,9 @@
 		}
 	}
 
+	if (q->asoc->src_out_of_asoc_ok)
+		goto sctp_flush_out;
+
 	/* Is it OK to send data chunks?  */
 	switch (asoc->state) {
 	case SCTP_STATE_COOKIE_ECHOED:
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a2..ab5ded2 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -503,7 +503,9 @@
 		sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
 		rcu_read_lock();
 		list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-			if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
+			if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) ||
+			    (laddr->state != SCTP_ADDR_SRC &&
+			    !asoc->src_out_of_asoc_ok))
 				continue;
 			if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
 				goto out_unlock;
@@ -623,6 +625,143 @@
 	INET_ECN_xmit(sk);
 }
 
+void sctp_addr_wq_timeout_handler(unsigned long arg)
+{
+	struct sctp_sockaddr_entry *addrw, *temp;
+	struct sctp_sock *sp;
+
+	spin_lock_bh(&sctp_addr_wq_lock);
+
+	list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+		SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
+		    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+		    addrw);
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+		/* Now we send an ASCONF for each association */
+		/* Note. we currently don't handle link local IPv6 addressees */
+		if (addrw->a.sa.sa_family == AF_INET6) {
+			struct in6_addr *in6;
+
+			if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
+			    IPV6_ADDR_LINKLOCAL)
+				goto free_next;
+
+			in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
+			if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+			    addrw->state == SCTP_ADDR_NEW) {
+				unsigned long timeo_val;
+
+				SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n",
+				    SCTP_ADDRESS_TICK_DELAY);
+				timeo_val = jiffies;
+				timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+				mod_timer(&sctp_addr_wq_timer, timeo_val);
+				break;
+			}
+		}
+#endif
+		list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+			struct sock *sk;
+
+			sk = sctp_opt2sk(sp);
+			/* ignore bound-specific endpoints */
+			if (!sctp_is_ep_boundall(sk))
+				continue;
+			sctp_bh_lock_sock(sk);
+			if (sctp_asconf_mgmt(sp, addrw) < 0)
+				SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
+			sctp_bh_unlock_sock(sk);
+		}
+free_next:
+		list_del(&addrw->list);
+		kfree(addrw);
+	}
+	spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+static void sctp_free_addr_wq(void)
+{
+	struct sctp_sockaddr_entry *addrw;
+	struct sctp_sockaddr_entry *temp;
+
+	spin_lock_bh(&sctp_addr_wq_lock);
+	del_timer(&sctp_addr_wq_timer);
+	list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+		list_del(&addrw->list);
+		kfree(addrw);
+	}
+	spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+/* lookup the entry for the same address in the addr_waitq
+ * sctp_addr_wq MUST be locked
+ */
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+{
+	struct sctp_sockaddr_entry *addrw;
+
+	list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+		if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
+			continue;
+		if (addrw->a.sa.sa_family == AF_INET) {
+			if (addrw->a.v4.sin_addr.s_addr ==
+			    addr->a.v4.sin_addr.s_addr)
+				return addrw;
+		} else if (addrw->a.sa.sa_family == AF_INET6) {
+			if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
+			    &addr->a.v6.sin6_addr))
+				return addrw;
+		}
+	}
+	return NULL;
+}
+
+void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+{
+	struct sctp_sockaddr_entry *addrw;
+	unsigned long timeo_val;
+
+	/* first, we check if an opposite message already exist in the queue.
+	 * If we found such message, it is removed.
+	 * This operation is a bit stupid, but the DHCP client attaches the
+	 * new address after a couple of addition and deletion of that address
+	 */
+
+	spin_lock_bh(&sctp_addr_wq_lock);
+	/* Offsets existing events in addr_wq */
+	addrw = sctp_addr_wq_lookup(addr);
+	if (addrw) {
+		if (addrw->state != cmd) {
+			SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
+			    " in wq %p\n", addrw->state, &addrw->a,
+			    &sctp_addr_waitq);
+			list_del(&addrw->list);
+			kfree(addrw);
+		}
+		spin_unlock_bh(&sctp_addr_wq_lock);
+		return;
+	}
+
+	/* OK, we have to add the new address to the wait queue */
+	addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+	if (addrw == NULL) {
+		spin_unlock_bh(&sctp_addr_wq_lock);
+		return;
+	}
+	addrw->state = cmd;
+	list_add_tail(&addrw->list, &sctp_addr_waitq);
+	SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
+	    " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+
+	if (!timer_pending(&sctp_addr_wq_timer)) {
+		timeo_val = jiffies;
+		timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+		mod_timer(&sctp_addr_wq_timer, timeo_val);
+	}
+	spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
 /* Event handler for inet address addition/deletion events.
  * The sctp_local_addr_list needs to be protocted by a spin lock since
  * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -650,6 +789,7 @@
 			addr->valid = 1;
 			spin_lock_bh(&sctp_local_addr_lock);
 			list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+			sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
 			spin_unlock_bh(&sctp_local_addr_lock);
 		}
 		break;
@@ -660,6 +800,7 @@
 			if (addr->a.sa.sa_family == AF_INET &&
 					addr->a.v4.sin_addr.s_addr ==
 					ifa->ifa_local) {
+				sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
 				found = 1;
 				addr->valid = 0;
 				list_del_rcu(&addr->list);
@@ -1242,6 +1383,7 @@
 	/* Disable ADDIP by default. */
 	sctp_addip_enable = 0;
 	sctp_addip_noauth = 0;
+	sctp_default_auto_asconf = 0;
 
 	/* Enable PR-SCTP by default. */
 	sctp_prsctp_enable = 1;
@@ -1266,6 +1408,13 @@
 	spin_lock_init(&sctp_local_addr_lock);
 	sctp_get_local_addr_list();
 
+	/* Initialize the address event list */
+	INIT_LIST_HEAD(&sctp_addr_waitq);
+	INIT_LIST_HEAD(&sctp_auto_asconf_splist);
+	spin_lock_init(&sctp_addr_wq_lock);
+	sctp_addr_wq_timer.expires = 0;
+	setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
+
 	status = sctp_v4_protosw_init();
 
 	if (status)
@@ -1337,6 +1486,7 @@
 	/* Unregister with inet6/inet layers. */
 	sctp_v6_del_protocol();
 	sctp_v4_del_protocol();
+	sctp_free_addr_wq();
 
 	/* Free the control endpoint.  */
 	inet_ctl_sock_destroy(sctp_ctl_sock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 58eb27f..81db4e3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2768,11 +2768,12 @@
 	int			addr_param_len = 0;
 	int 			totallen = 0;
 	int 			i;
+	int			del_pickup = 0;
 
 	/* Get total length of all the address parameters. */
 	addr_buf = addrs;
 	for (i = 0; i < addrcnt; i++) {
-		addr = (union sctp_addr *)addr_buf;
+		addr = addr_buf;
 		af = sctp_get_af_specific(addr->v4.sin_family);
 		addr_param_len = af->to_addr_param(addr, &addr_param);
 
@@ -2780,6 +2781,13 @@
 		totallen += addr_param_len;
 
 		addr_buf += af->sockaddr_len;
+		if (asoc->asconf_addr_del_pending && !del_pickup) {
+			/* reuse the parameter length from the same scope one */
+			totallen += paramlen;
+			totallen += addr_param_len;
+			del_pickup = 1;
+			SCTP_DEBUG_PRINTK("mkasconf_update_ip: picked same-scope del_pending addr, totallen for all addresses is %d\n", totallen);
+		}
 	}
 
 	/* Create an asconf chunk with the required length. */
@@ -2790,7 +2798,7 @@
 	/* Add the address parameters to the asconf chunk. */
 	addr_buf = addrs;
 	for (i = 0; i < addrcnt; i++) {
-		addr = (union sctp_addr *)addr_buf;
+		addr = addr_buf;
 		af = sctp_get_af_specific(addr->v4.sin_family);
 		addr_param_len = af->to_addr_param(addr, &addr_param);
 		param.param_hdr.type = flags;
@@ -2802,6 +2810,17 @@
 
 		addr_buf += af->sockaddr_len;
 	}
+	if (flags == SCTP_PARAM_ADD_IP && del_pickup) {
+		addr = asoc->asconf_addr_del_pending;
+		af = sctp_get_af_specific(addr->v4.sin_family);
+		addr_param_len = af->to_addr_param(addr, &addr_param);
+		param.param_hdr.type = SCTP_PARAM_DEL_IP;
+		param.param_hdr.length = htons(paramlen + addr_param_len);
+		param.crr_id = i;
+
+		sctp_addto_chunk(retval, paramlen, &param);
+		sctp_addto_chunk(retval, addr_param_len, &addr_param);
+	}
 	return retval;
 }
 
@@ -2939,8 +2958,7 @@
 	union sctp_addr	addr;
 	union sctp_addr_param *addr_param;
 
-	addr_param = (union sctp_addr_param *)
-			((void *)asconf_param + sizeof(sctp_addip_param_t));
+	addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
 
 	if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
 	    asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
@@ -3014,7 +3032,7 @@
 		 * an Error Cause TLV set to the new error code 'Request to
 		 * Delete Source IP Address'
 		 */
-		if (sctp_cmp_addr_exact(sctp_source(asconf), &addr))
+		if (sctp_cmp_addr_exact(&asconf->source, &addr))
 			return SCTP_ERROR_DEL_SRC_IP;
 
 		/* Section 4.2.2
@@ -3125,7 +3143,7 @@
 	 * asconf parameter.
 	 */
 	length = ntohs(addr_param->p.length);
-	asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
+	asconf_param = (void *)addr_param + length;
 	chunk_len -= length;
 
 	/* create an ASCONF_ACK chunk.
@@ -3166,8 +3184,7 @@
 
 		/* Move to the next ASCONF param. */
 		length = ntohs(asconf_param->param_hdr.length);
-		asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
-						      length);
+		asconf_param = (void *)asconf_param + length;
 		chunk_len -= length;
 	}
 
@@ -3197,8 +3214,7 @@
 	struct sctp_transport *transport;
 	struct sctp_sockaddr_entry *saddr;
 
-	addr_param = (union sctp_addr_param *)
-			((void *)asconf_param + sizeof(sctp_addip_param_t));
+	addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
 
 	/* We have checked the packet before, so we do not check again.	*/
 	af = sctp_get_af_specific(param_type2af(addr_param->p.type));
@@ -3224,6 +3240,11 @@
 	case SCTP_PARAM_DEL_IP:
 		local_bh_disable();
 		sctp_del_bind_addr(bp, &addr);
+		if (asoc->asconf_addr_del_pending != NULL &&
+		    sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) {
+			kfree(asoc->asconf_addr_del_pending);
+			asoc->asconf_addr_del_pending = NULL;
+		}
 		local_bh_enable();
 		list_for_each_entry(transport, &asoc->peer.transport_addr_list,
 				transports) {
@@ -3278,8 +3299,7 @@
 				return SCTP_ERROR_NO_ERROR;
 			case SCTP_PARAM_ERR_CAUSE:
 				length = sizeof(sctp_addip_param_t);
-				err_param = (sctp_errhdr_t *)
-					   ((void *)asconf_ack_param + length);
+				err_param = (void *)asconf_ack_param + length;
 				asconf_ack_len -= length;
 				if (asconf_ack_len > 0)
 					return err_param->cause;
@@ -3292,8 +3312,7 @@
 		}
 
 		length = ntohs(asconf_ack_param->param_hdr.length);
-		asconf_ack_param = (sctp_addip_param_t *)
-					((void *)asconf_ack_param + length);
+		asconf_ack_param = (void *)asconf_ack_param + length;
 		asconf_ack_len -= length;
 	}
 
@@ -3325,7 +3344,7 @@
 	 * pointer to the first asconf parameter.
 	 */
 	length = ntohs(addr_param->p.length);
-	asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
+	asconf_param = (void *)addr_param + length;
 	asconf_len -= length;
 
 	/* ADDIP 4.1
@@ -3376,11 +3395,13 @@
 		 * one.
 		 */
 		length = ntohs(asconf_param->param_hdr.length);
-		asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
-						      length);
+		asconf_param = (void *)asconf_param + length;
 		asconf_len -= length;
 	}
 
+	if (no_err && asoc->src_out_of_asoc_ok)
+		asoc->src_out_of_asoc_ok = 0;
+
 	/* Free the cached last sent asconf chunk. */
 	list_del_init(&asconf->transmitted_list);
 	sctp_chunk_free(asconf);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 534c2e5..1b2bb64 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1201,7 +1201,7 @@
 	int local_cork = 0;
 
 	if (SCTP_EVENT_T_TIMEOUT != event_type)
-		chunk = (struct sctp_chunk *) event_arg;
+		chunk = event_arg;
 
 	/* Note:  This whole file is a huge candidate for rework.
 	 * For example, each command could either have its own handler, so
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913..fd31b36 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -476,7 +476,7 @@
 		/* The list may contain either IPv4 or IPv6 address;
 		 * determine the address length for walking thru the list.
 		 */
-		sa_addr = (struct sockaddr *)addr_buf;
+		sa_addr = addr_buf;
 		af = sctp_get_af_specific(sa_addr->sa_family);
 		if (!af) {
 			retval = -EINVAL;
@@ -555,7 +555,7 @@
 		 */
 		addr_buf = addrs;
 		for (i = 0; i < addrcnt; i++) {
-			addr = (union sctp_addr *)addr_buf;
+			addr = addr_buf;
 			af = sctp_get_af_specific(addr->v4.sin_family);
 			if (!af) {
 				retval = -EINVAL;
@@ -583,22 +583,35 @@
 			goto out;
 		}
 
-		retval = sctp_send_asconf(asoc, chunk);
-		if (retval)
-			goto out;
-
 		/* Add the new addresses to the bind address list with
 		 * use_as_src set to 0.
 		 */
 		addr_buf = addrs;
 		for (i = 0; i < addrcnt; i++) {
-			addr = (union sctp_addr *)addr_buf;
+			addr = addr_buf;
 			af = sctp_get_af_specific(addr->v4.sin_family);
 			memcpy(&saveaddr, addr, af->sockaddr_len);
 			retval = sctp_add_bind_addr(bp, &saveaddr,
 						    SCTP_ADDR_NEW, GFP_ATOMIC);
 			addr_buf += af->sockaddr_len;
 		}
+		if (asoc->src_out_of_asoc_ok) {
+			struct sctp_transport *trans;
+
+			list_for_each_entry(trans,
+			    &asoc->peer.transport_addr_list, transports) {
+				/* Clear the source and route cache */
+				dst_release(trans->dst);
+				trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
+				    2*asoc->pathmtu, 4380));
+				trans->ssthresh = asoc->peer.i.a_rwnd;
+				trans->rto = asoc->rto_initial;
+				trans->rtt = trans->srtt = trans->rttvar = 0;
+				sctp_transport_route(trans, NULL,
+				    sctp_sk(asoc->base.sk));
+			}
+		}
+		retval = sctp_send_asconf(asoc, chunk);
 	}
 
 out:
@@ -646,7 +659,7 @@
 			goto err_bindx_rem;
 		}
 
-		sa_addr = (union sctp_addr *)addr_buf;
+		sa_addr = addr_buf;
 		af = sctp_get_af_specific(sa_addr->sa.sa_family);
 		if (!af) {
 			retval = -EINVAL;
@@ -715,7 +728,9 @@
 	struct sctp_sockaddr_entry *saddr;
 	int 			i;
 	int 			retval = 0;
+	int			stored = 0;
 
+	chunk = NULL;
 	if (!sctp_addip_enable)
 		return retval;
 
@@ -743,7 +758,7 @@
 		 */
 		addr_buf = addrs;
 		for (i = 0; i < addrcnt; i++) {
-			laddr = (union sctp_addr *)addr_buf;
+			laddr = addr_buf;
 			af = sctp_get_af_specific(laddr->v4.sin_family);
 			if (!af) {
 				retval = -EINVAL;
@@ -766,8 +781,37 @@
 		bp = &asoc->base.bind_addr;
 		laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
 					       addrcnt, sp);
-		if (!laddr)
-			continue;
+		if ((laddr == NULL) && (addrcnt == 1)) {
+			if (asoc->asconf_addr_del_pending)
+				continue;
+			asoc->asconf_addr_del_pending =
+			    kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
+			if (asoc->asconf_addr_del_pending == NULL) {
+				retval = -ENOMEM;
+				goto out;
+			}
+			asoc->asconf_addr_del_pending->sa.sa_family =
+				    addrs->sa_family;
+			asoc->asconf_addr_del_pending->v4.sin_port =
+				    htons(bp->port);
+			if (addrs->sa_family == AF_INET) {
+				struct sockaddr_in *sin;
+
+				sin = (struct sockaddr_in *)addrs;
+				asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
+			} else if (addrs->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addrs;
+				ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+			}
+			SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
+			    " at %p\n", asoc, asoc->asconf_addr_del_pending,
+			    asoc->asconf_addr_del_pending);
+			asoc->src_out_of_asoc_ok = 1;
+			stored = 1;
+			goto skip_mkasconf;
+		}
 
 		/* We do not need RCU protection throughout this loop
 		 * because this is done under a socket lock from the
@@ -780,12 +824,13 @@
 			goto out;
 		}
 
+skip_mkasconf:
 		/* Reset use_as_src flag for the addresses in the bind address
 		 * list that are to be deleted.
 		 */
 		addr_buf = addrs;
 		for (i = 0; i < addrcnt; i++) {
-			laddr = (union sctp_addr *)addr_buf;
+			laddr = addr_buf;
 			af = sctp_get_af_specific(laddr->v4.sin_family);
 			list_for_each_entry(saddr, &bp->address_list, list) {
 				if (sctp_cmp_addr_exact(&saddr->a, laddr))
@@ -805,12 +850,37 @@
 					     sctp_sk(asoc->base.sk));
 		}
 
+		if (stored)
+			/* We don't need to transmit ASCONF */
+			continue;
 		retval = sctp_send_asconf(asoc, chunk);
 	}
 out:
 	return retval;
 }
 
+/* set addr events to assocs in the endpoint.  ep and addr_wq must be locked */
+int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
+{
+	struct sock *sk = sctp_opt2sk(sp);
+	union sctp_addr *addr;
+	struct sctp_af *af;
+
+	/* It is safe to write port space in caller. */
+	addr = &addrw->a;
+	addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
+	af = sctp_get_af_specific(addr->sa.sa_family);
+	if (!af)
+		return -EINVAL;
+	if (sctp_verify_addr(sk, addr, af->sockaddr_len))
+		return -EINVAL;
+
+	if (addrw->state == SCTP_ADDR_NEW)
+		return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
+	else
+		return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
+}
+
 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
  *
  * API 8.1
@@ -927,7 +997,7 @@
 			return -EINVAL;
 		}
 
-		sa_addr = (struct sockaddr *)addr_buf;
+		sa_addr = addr_buf;
 		af = sctp_get_af_specific(sa_addr->sa_family);
 
 		/* If the address family is not supported or if this address
@@ -1018,7 +1088,7 @@
 			goto out_free;
 		}
 
-		sa_addr = (union sctp_addr *)addr_buf;
+		sa_addr = addr_buf;
 		af = sctp_get_af_specific(sa_addr->sa.sa_family);
 
 		/* If the address family is not supported or if this address
@@ -3334,6 +3404,46 @@
 
 }
 
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ *
+ * This option will enable or disable the use of the automatic generation of
+ * ASCONF chunks to add and delete addresses to an existing association.  Note
+ * that this option has two caveats namely: a) it only affects sockets that
+ * are bound to all addresses available to the SCTP stack, and b) the system
+ * administrator may have an overriding control that turns the ASCONF feature
+ * off no matter what setting the socket option may have.
+ * This option expects an integer boolean flag, where a non-zero value turns on
+ * the option, and a zero value turns off the option.
+ * Note. In this implementation, socket operation overrides default parameter
+ * being set by sysctl as well as FreeBSD implementation
+ */
+static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+					unsigned int optlen)
+{
+	int val;
+	struct sctp_sock *sp = sctp_sk(sk);
+
+	if (optlen < sizeof(int))
+		return -EINVAL;
+	if (get_user(val, (int __user *)optval))
+		return -EFAULT;
+	if (!sctp_is_ep_boundall(sk) && val)
+		return -EINVAL;
+	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+		return 0;
+
+	if (val == 0 && sp->do_auto_asconf) {
+		list_del(&sp->auto_asconf_list);
+		sp->do_auto_asconf = 0;
+	} else if (val && !sp->do_auto_asconf) {
+		list_add_tail(&sp->auto_asconf_list,
+		    &sctp_auto_asconf_splist);
+		sp->do_auto_asconf = 1;
+	}
+	return 0;
+}
+
 
 /* API 6.2 setsockopt(), getsockopt()
  *
@@ -3481,6 +3591,9 @@
 	case SCTP_AUTH_DELETE_KEY:
 		retval = sctp_setsockopt_del_key(sk, optval, optlen);
 		break;
+	case SCTP_AUTO_ASCONF:
+		retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -3763,6 +3876,12 @@
 	local_bh_disable();
 	percpu_counter_inc(&sctp_sockets_allocated);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+	if (sctp_default_auto_asconf) {
+		list_add_tail(&sp->auto_asconf_list,
+		    &sctp_auto_asconf_splist);
+		sp->do_auto_asconf = 1;
+	} else
+		sp->do_auto_asconf = 0;
 	local_bh_enable();
 
 	return 0;
@@ -3771,13 +3890,17 @@
 /* Cleanup any SCTP per socket resources.  */
 SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
 {
-	struct sctp_endpoint *ep;
+	struct sctp_sock *sp;
 
 	SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
 
 	/* Release our hold on the endpoint. */
-	ep = sctp_sk(sk)->ep;
-	sctp_endpoint_free(ep);
+	sp = sctp_sk(sk);
+	if (sp->do_auto_asconf) {
+		sp->do_auto_asconf = 0;
+		list_del(&sp->auto_asconf_list);
+	}
+	sctp_endpoint_free(sp->ep);
 	local_bh_disable();
 	percpu_counter_dec(&sctp_sockets_allocated);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -5277,6 +5400,28 @@
 }
 
 /*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ * See the corresponding setsockopt entry as description
+ */
+static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
+				   char __user *optval, int __user *optlen)
+{
+	int val = 0;
+
+	if (len < sizeof(int))
+		return -EINVAL;
+
+	len = sizeof(int);
+	if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
+		val = 1;
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &val, len))
+		return -EFAULT;
+	return 0;
+}
+
+/*
  * 8.2.6. Get the Current Identifiers of Associations
  *        (SCTP_GET_ASSOC_ID_LIST)
  *
@@ -5460,6 +5605,9 @@
 	case SCTP_GET_ASSOC_ID_LIST:
 		retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
 		break;
+	case SCTP_AUTO_ASCONF:
+		retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -6512,6 +6660,7 @@
 	struct sk_buff *skb, *tmp;
 	struct sctp_ulpevent *event;
 	struct sctp_bind_hashbucket *head;
+	struct list_head tmplist;
 
 	/* Migrate socket buffer sizes and all the socket level options to the
 	 * new socket.
@@ -6519,7 +6668,12 @@
 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
 	/* Brute force copy old sctp opt. */
-	inet_sk_copy_descendant(newsk, oldsk);
+	if (oldsp->do_auto_asconf) {
+		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+		inet_sk_copy_descendant(newsk, oldsk);
+		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+	} else
+		inet_sk_copy_descendant(newsk, oldsk);
 
 	/* Restore the ep value that was overwritten with the above structure
 	 * copy.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 50cb57f..6b39529 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -183,6 +183,13 @@
 		.proc_handler	= proc_dointvec,
 	},
 	{
+		.procname	= "default_auto_asconf",
+		.data		= &sctp_default_auto_asconf,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "prsctp_enable",
 		.data		= &sctp_prsctp_enable,
 		.maxlen		= sizeof(int),
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index cd6e4aa..727e506 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -626,7 +626,7 @@
 		if (err < 0)
 			goto out;
 		cred = task->tk_rqstp->rq_cred;
-	};
+	}
 	dprintk("RPC: %5u refreshing %s cred %p\n",
 		task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c3c232a..a385430 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -42,6 +42,7 @@
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/debug.h>
 #include <linux/sunrpc/rpc_rdma.h>
+#include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 80f8da3..28236ba 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -47,6 +47,7 @@
  *  o buffer memory
  */
 
+#include <linux/interrupt.h>
 #include <linux/pci.h>	/* for Tavor hack below */
 #include <linux/slab.h>
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98fa8eb..10823e2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3753,10 +3753,6 @@
 	void *hdr;
 	struct nlattr *infoattr;
 
-	/* Survey without a channel doesn't make sense */
-	if (!survey->channel)
-		return -EINVAL;
-
 	hdr = nl80211hdr_put(msg, pid, seq, flags,
 			     NL80211_CMD_NEW_SURVEY_RESULTS);
 	if (!hdr)
@@ -3819,6 +3815,8 @@
 	}
 
 	while (1) {
+		struct ieee80211_channel *chan;
+
 		res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx,
 					    &survey);
 		if (res == -ENOENT)
@@ -3826,6 +3824,19 @@
 		if (res)
 			goto out_err;
 
+		/* Survey without a channel doesn't make sense */
+		if (!survey.channel) {
+			res = -EINVAL;
+			goto out;
+		}
+
+		chan = ieee80211_get_channel(&dev->wiphy,
+					     survey.channel->center_freq);
+		if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
+			survey_idx++;
+			continue;
+		}
+
 		if (nl80211_send_survey(skb,
 				NETLINK_CB(cb->skb).pid,
 				cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -4360,6 +4371,93 @@
 	return err;
 }
 
+static int nl80211_testmode_dump(struct sk_buff *skb,
+				 struct netlink_callback *cb)
+{
+	struct cfg80211_registered_device *dev;
+	int err;
+	long phy_idx;
+	void *data = NULL;
+	int data_len = 0;
+
+	if (cb->args[0]) {
+		/*
+		 * 0 is a valid index, but not valid for args[0],
+		 * so we need to offset by 1.
+		 */
+		phy_idx = cb->args[0] - 1;
+	} else {
+		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+				  nl80211_fam.attrbuf, nl80211_fam.maxattr,
+				  nl80211_policy);
+		if (err)
+			return err;
+		if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY])
+			return -EINVAL;
+		phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+		if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
+			cb->args[1] =
+				(long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
+	}
+
+	if (cb->args[1]) {
+		data = nla_data((void *)cb->args[1]);
+		data_len = nla_len((void *)cb->args[1]);
+	}
+
+	mutex_lock(&cfg80211_mutex);
+	dev = cfg80211_rdev_by_wiphy_idx(phy_idx);
+	if (!dev) {
+		mutex_unlock(&cfg80211_mutex);
+		return -ENOENT;
+	}
+	cfg80211_lock_rdev(dev);
+	mutex_unlock(&cfg80211_mutex);
+
+	if (!dev->ops->testmode_dump) {
+		err = -EOPNOTSUPP;
+		goto out_err;
+	}
+
+	while (1) {
+		void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   NL80211_CMD_TESTMODE);
+		struct nlattr *tmdata;
+
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+
+		tmdata = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
+		if (!tmdata) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+		err = dev->ops->testmode_dump(&dev->wiphy, skb, cb,
+					      data, data_len);
+		nla_nest_end(skb, tmdata);
+
+		if (err == -ENOBUFS || err == -ENOENT) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		} else if (err) {
+			genlmsg_cancel(skb, hdr);
+			goto out_err;
+		}
+
+		genlmsg_end(skb, hdr);
+	}
+
+	err = skb->len;
+	/* see above */
+	cb->args[0] = phy_idx + 1;
+ out_err:
+	cfg80211_unlock_rdev(dev);
+	return err;
+}
+
 static struct sk_buff *
 __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
 			      int approxlen, u32 pid, u32 seq, gfp_t gfp)
@@ -5657,6 +5755,7 @@
 	{
 		.cmd = NL80211_CMD_TESTMODE,
 		.doit = nl80211_testmode_do,
+		.dumpit = nl80211_testmode_dump,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_WIPHY |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c658cb3..0256b8a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2299,7 +2299,8 @@
 		if (link->dump == NULL)
 			return -EINVAL;
 
-		return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done);
+		return netlink_dump_start(net->xfrm.nlsk, skb, nlh,
+					  link->dump, link->done, 0);
 	}
 
 	err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,