Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluetooth-next-2.6
diff --git a/MAINTAINERS b/MAINTAINERS
index b1b9298..b04b97f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1365,7 +1365,7 @@
 M:	Eilon Greenstein <eilong@broadcom.com>
 L:	netdev@vger.kernel.org
 S:	Supported
-F:	drivers/net/bnx2x*
+F:	drivers/net/bnx2x/
 
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
 M:	Matt Carlson <mcarlson@broadcom.com>
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index f053726..2ab233b 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -25,11 +25,6 @@
 #include "net_kern.h"
 #include "net_user.h"
 
-static inline void set_ether_mac(struct net_device *dev, unsigned char *addr)
-{
-	memcpy(dev->dev_addr, addr, ETH_ALEN);
-}
-
 #define DRIVER_NAME "uml-netdev"
 
 static DEFINE_SPINLOCK(opened_lock);
@@ -266,7 +261,7 @@
 	struct sockaddr *hwaddr = addr;
 
 	spin_lock_irq(&lp->lock);
-	set_ether_mac(dev, hwaddr->sa_data);
+	eth_mac_addr(dev, hwaddr->sa_data);
 	spin_unlock_irq(&lp->lock);
 
 	return 0;
@@ -380,7 +375,6 @@
 	.ndo_tx_timeout 	= uml_net_tx_timeout,
 	.ndo_set_mac_address	= uml_net_set_mac,
 	.ndo_change_mtu 	= uml_net_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
@@ -478,7 +472,7 @@
 	    ((*transport->user->init)(&lp->user, dev) != 0))
 		goto out_unregister;
 
-	set_ether_mac(dev, device->mac);
+	eth_mac_addr(dev, device->mac);
 	dev->mtu = transport->user->mtu;
 	dev->netdev_ops = &uml_netdev_ops;
 	dev->ethtool_ops = &uml_net_ethtool_ops;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 729a149..2f3516b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -154,7 +154,6 @@
 #endif
 static void ns_poll(unsigned long arg);
 static int ns_parse_mac(char *mac, unsigned char *esi);
-static short ns_h2i(char c);
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
 		       unsigned long addr);
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -2824,9 +2823,9 @@
 		return -1;
 	j = 0;
 	for (i = 0; i < 6; i++) {
-		if ((byte1 = ns_h2i(mac[j++])) < 0)
+		if ((byte1 = hex_to_bin(mac[j++])) < 0)
 			return -1;
-		if ((byte0 = ns_h2i(mac[j++])) < 0)
+		if ((byte0 = hex_to_bin(mac[j++])) < 0)
 			return -1;
 		esi[i] = (unsigned char)(byte1 * 16 + byte0);
 		if (i < 5) {
@@ -2837,16 +2836,6 @@
 	return 0;
 }
 
-static short ns_h2i(char c)
-{
-	if (c >= '0' && c <= '9')
-		return (short)(c - '0');
-	if (c >= 'A' && c <= 'F')
-		return (short)(c - 'A' + 10);
-	if (c >= 'a' && c <= 'f')
-		return (short)(c - 'a' + 10);
-	return -1;
-}
 
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
 		       unsigned long addr)
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 069a03f..c754d88 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1020,10 +1020,16 @@
 	ioaddr = pci_iomap(pdev, pci_bar, 0);
 	if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
 		ioaddr = pci_iomap(pdev, 0, 0);
+	if (!ioaddr) {
+		pci_disable_device(pdev);
+		rc = -ENOMEM;
+		goto out;
+	}
 
 	rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
 			   ent->driver_data, unit);
 	if (rc < 0) {
+		pci_iounmap(pdev, ioaddr);
 		pci_disable_device(pdev);
 		goto out;
 	}
@@ -1387,7 +1393,7 @@
 		mii_preamble_required++;
 		if (vp->drv_flags & EXTRA_PREAMBLE)
 			mii_preamble_required++;
-		mdio_sync(ioaddr, 32);
+		mdio_sync(vp, 32);
 		mdio_read(dev, 24, MII_BMSR);
 		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
 			int mii_status, phyx;
@@ -2912,6 +2918,36 @@
 	}
 }
 
+static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct vortex_private *vp = netdev_priv(dev);
+
+	spin_lock_irq(&vp->lock);
+	wol->supported = WAKE_MAGIC;
+
+	wol->wolopts = 0;
+	if (vp->enable_wol)
+		wol->wolopts |= WAKE_MAGIC;
+	spin_unlock_irq(&vp->lock);
+}
+
+static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct vortex_private *vp = netdev_priv(dev);
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	spin_lock_irq(&vp->lock);
+	if (wol->wolopts & WAKE_MAGIC)
+		vp->enable_wol = 1;
+	else
+		vp->enable_wol = 0;
+	acpi_set_WOL(dev);
+	spin_unlock_irq(&vp->lock);
+
+	return 0;
+}
+
 static const struct ethtool_ops vortex_ethtool_ops = {
 	.get_drvinfo		= vortex_get_drvinfo,
 	.get_strings            = vortex_get_strings,
@@ -2923,6 +2959,8 @@
 	.set_settings           = vortex_set_settings,
 	.get_link               = ethtool_op_get_link,
 	.nway_reset             = vortex_nway_reset,
+	.get_wol                = vortex_get_wol,
+	.set_wol                = vortex_set_wol,
 };
 
 #ifdef CONFIG_PCI
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8c13df7..ebe6839 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1751,7 +1751,7 @@
 
 config KS8842
 	tristate "Micrel KSZ8841/42 with generic bus interface"
-	depends on HAS_IOMEM
+	depends on HAS_IOMEM && DMA_ENGINE
 	help
 	 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
 	 ethernet switch chip (managed, VLAN, QoS) from Micrel or
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ce55581..56e8c27 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -84,8 +84,7 @@
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
 obj-$(CONFIG_CNIC) += cnic.o
-obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o
+obj-$(CONFIG_BNX2X) += bnx2x/
 spidernet-y += spider_net.o spider_net_ethtool.o
 obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
 obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f17428c..99197bd 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -33,7 +33,7 @@
 
 #include "be_hw.h"
 
-#define DRV_VER			"2.102.147u"
+#define DRV_VER			"2.103.175u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
@@ -220,7 +220,16 @@
 	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
 };
 
+struct be_vf_cfg {
+	unsigned char vf_mac_addr[ETH_ALEN];
+	u32 vf_if_handle;
+	u32 vf_pmac_id;
+	u16 vf_vlan_tag;
+	u32 vf_tx_rate;
+};
+
 #define BE_NUM_MSIX_VECTORS		2	/* 1 each for Tx and Rx */
+#define BE_INVALID_PMAC_ID		0xffffffff
 struct be_adapter {
 	struct pci_dev *pdev;
 	struct net_device *netdev;
@@ -276,9 +285,11 @@
 	u32 port_num;
 	bool promiscuous;
 	bool wol;
-	u32 cap;
+	u32 function_mode;
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
+	bool ue_detected;
+	bool stats_ioctl_sent;
 	int link_speed;
 	u8 port_type;
 	u8 transceiver;
@@ -288,8 +299,7 @@
 	struct completion flash_compl;
 
 	bool sriov_enabled;
-	u32 vf_if_handle[BE_MAX_VF];
-	u32 vf_pmac_id[BE_MAX_VF];
+	struct be_vf_cfg vf_cfg[BE_MAX_VF];
 	u8 base_eq_id;
 	u8 is_virtfn;
 };
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 344e062..3d30549 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -75,8 +75,10 @@
 			be_dws_le_to_cpu(&resp->hw_stats,
 						sizeof(resp->hw_stats));
 			netdev_stats_update(adapter);
+			adapter->stats_ioctl_sent = false;
 		}
-	} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
+	} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
+		   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
 		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 				CQE_STATUS_EXTD_MASK;
 		dev_warn(&adapter->pdev->dev,
@@ -205,6 +207,7 @@
 
 		if (msecs > 4000) {
 			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+			be_dump_ue(adapter);
 			return -1;
 		}
 
@@ -949,6 +952,7 @@
 	sge->len = cpu_to_le32(nonemb_cmd->size);
 
 	be_mcc_notify(adapter);
+	adapter->stats_ioctl_sent = true;
 
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
@@ -1257,7 +1261,7 @@
 }
 
 /* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_query_fw_cfg *req;
@@ -1278,7 +1282,7 @@
 	if (!status) {
 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
 		*port_num = le32_to_cpu(resp->phys_port);
-		*cap = le32_to_cpu(resp->function_cap);
+		*mode = le32_to_cpu(resp->function_mode);
 	}
 
 	spin_unlock(&adapter->mbox_lock);
@@ -1730,3 +1734,36 @@
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
+
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_set_qos *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+				OPCODE_COMMON_SET_QOS);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_SET_QOS, sizeof(*req));
+
+	req->hdr.domain = domain;
+	req->valid_bits = BE_QOS_BITS_NIC;
+	req->max_bps_nic = bps;
+
+	status = be_mcc_notify_wait(adapter);
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 912a058..bdc10a2 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@
 #define OPCODE_COMMON_CQ_CREATE				12
 #define OPCODE_COMMON_EQ_CREATE				13
 #define OPCODE_COMMON_MCC_CREATE        		21
+#define OPCODE_COMMON_SET_QOS				28
 #define OPCODE_COMMON_SEEPROM_READ			30
 #define OPCODE_COMMON_NTWK_RX_FILTER    		34
 #define OPCODE_COMMON_GET_FW_VERSION			35
@@ -748,7 +749,7 @@
 	u32 be_config_number;
 	u32 asic_revision;
 	u32 phys_port;
-	u32 function_cap;
+	u32 function_mode;
 	u32 rsvd[26];
 };
 
@@ -894,6 +895,22 @@
 	u32 future_use[4];
 };
 
+/*********************** Set QOS ***********************/
+
+#define BE_QOS_BITS_NIC				1
+
+struct be_cmd_req_set_qos {
+	struct be_cmd_req_hdr hdr;
+	u32 valid_bits;
+	u32 max_bps_nic;
+	u32 rsvd[7];
+};
+
+struct be_cmd_resp_set_qos {
+	struct be_cmd_resp_hdr hdr;
+	u32 rsvd;
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -974,4 +991,6 @@
 				u8 loopback_type, u8 enable);
 extern int be_cmd_get_phy_info(struct be_adapter *adapter,
 		struct be_dma_mem *cmd);
+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+extern void be_dump_ue(struct be_adapter *adapter);
 
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index c0ade24..cd16243 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -322,10 +322,11 @@
 	int status;
 	u16 intf_type;
 
-	if (adapter->link_speed < 0) {
+	if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
 		status = be_cmd_link_status_query(adapter, &link_up,
 						&mac_speed, &link_speed);
 
+		be_link_status_update(adapter, link_up);
 		/* link_speed is in units of 10 Mbps */
 		if (link_speed) {
 			ecmd->speed = link_speed*10;
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 0683967..6c8f9bb 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -56,6 +56,16 @@
 #define PCICFG_PM_CONTROL_OFFSET		0x44
 #define PCICFG_PM_CONTROL_MASK			0x108	/* bits 3 & 8 */
 
+/********* Online Control Registers *******/
+#define PCICFG_ONLINE0				0xB0
+#define PCICFG_ONLINE1				0xB4
+
+/********* UE Status and Mask Registers ***/
+#define PCICFG_UE_STATUS_LOW			0xA0
+#define PCICFG_UE_STATUS_HIGH			0xA4
+#define PCICFG_UE_STATUS_LOW_MASK		0xA8
+#define PCICFG_UE_STATUS_HI_MASK		0xAC
+
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET 			0xC18
 #define CEV_ISR_SIZE				4
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index e6ca923..74e146f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -40,6 +40,76 @@
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
+/* UE Status Low CSR */
+static char *ue_status_low_desc[] = {
+	"CEV",
+	"CTX",
+	"DBUF",
+	"ERX",
+	"Host",
+	"MPU",
+	"NDMA",
+	"PTC ",
+	"RDMA ",
+	"RXF ",
+	"RXIPS ",
+	"RXULP0 ",
+	"RXULP1 ",
+	"RXULP2 ",
+	"TIM ",
+	"TPOST ",
+	"TPRE ",
+	"TXIPS ",
+	"TXULP0 ",
+	"TXULP1 ",
+	"UC ",
+	"WDMA ",
+	"TXULP2 ",
+	"HOST1 ",
+	"P0_OB_LINK ",
+	"P1_OB_LINK ",
+	"HOST_GPIO ",
+	"MBOX ",
+	"AXGMAC0",
+	"AXGMAC1",
+	"JTAG",
+	"MPU_INTPEND"
+};
+/* UE Status High CSR */
+static char *ue_status_hi_desc[] = {
+	"LPCMEMHOST",
+	"MGMT_MAC",
+	"PCS0ONLINE",
+	"MPU_IRAM",
+	"PCS1ONLINE",
+	"PCTL0",
+	"PCTL1",
+	"PMEM",
+	"RR",
+	"TXPB",
+	"RXPP",
+	"XAUI",
+	"TXP",
+	"ARM",
+	"IPC",
+	"HOST2",
+	"HOST3",
+	"HOST4",
+	"HOST5",
+	"HOST6",
+	"HOST7",
+	"HOST8",
+	"HOST9",
+	"NETC"
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown"
+};
 
 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
@@ -552,11 +622,18 @@
  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
  * If the user configures more, place BE in vlan promiscuous mode.
  */
-static int be_vid_config(struct be_adapter *adapter)
+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
 	u16 vtag[BE_NUM_VLANS_SUPPORTED];
 	u16 ntags = 0, i;
 	int status = 0;
+	u32 if_handle;
+
+	if (vf) {
+		if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
+		vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
+		status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+	}
 
 	if (adapter->vlans_added <= adapter->max_vlans)  {
 		/* Construct VLAN Table to give to HW */
@@ -572,6 +649,7 @@
 		status = be_cmd_vlan_config(adapter, adapter->if_handle,
 					NULL, 0, 1, 1);
 	}
+
 	return status;
 }
 
@@ -592,27 +670,28 @@
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
+	adapter->vlans_added++;
 	if (!be_physfn(adapter))
 		return;
 
 	adapter->vlan_tag[vid] = 1;
-	adapter->vlans_added++;
 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
-		be_vid_config(adapter);
+		be_vid_config(adapter, false, 0);
 }
 
 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
+	adapter->vlans_added--;
+	vlan_group_set_device(adapter->vlan_grp, vid, NULL);
+
 	if (!be_physfn(adapter))
 		return;
 
 	adapter->vlan_tag[vid] = 0;
-	vlan_group_set_device(adapter->vlan_grp, vid, NULL);
-	adapter->vlans_added--;
 	if (adapter->vlans_added <= adapter->max_vlans)
-		be_vid_config(adapter);
+		be_vid_config(adapter, false, 0);
 }
 
 static void be_set_multicast_list(struct net_device *netdev)
@@ -656,14 +735,93 @@
 	if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
 		return -EINVAL;
 
-	status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
-				adapter->vf_pmac_id[vf]);
+	if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+		status = be_cmd_pmac_del(adapter,
+					adapter->vf_cfg[vf].vf_if_handle,
+					adapter->vf_cfg[vf].vf_pmac_id);
 
-	status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
-				&adapter->vf_pmac_id[vf]);
-	if (!status)
+	status = be_cmd_pmac_add(adapter, mac,
+				adapter->vf_cfg[vf].vf_if_handle,
+				&adapter->vf_cfg[vf].vf_pmac_id);
+
+	if (status)
 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
 				mac, vf);
+	else
+		memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+	return status;
+}
+
+static int be_get_vf_config(struct net_device *netdev, int vf,
+			struct ifla_vf_info *vi)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	if (!adapter->sriov_enabled)
+		return -EPERM;
+
+	if (vf >= num_vfs)
+		return -EINVAL;
+
+	vi->vf = vf;
+	vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
+	vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+	vi->qos = 0;
+	memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+
+	return 0;
+}
+
+static int be_set_vf_vlan(struct net_device *netdev,
+			int vf, u16 vlan, u8 qos)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	int status = 0;
+
+	if (!adapter->sriov_enabled)
+		return -EPERM;
+
+	if ((vf >= num_vfs) || (vlan > 4095))
+		return -EINVAL;
+
+	if (vlan) {
+		adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+		adapter->vlans_added++;
+	} else {
+		adapter->vf_cfg[vf].vf_vlan_tag = 0;
+		adapter->vlans_added--;
+	}
+
+	status = be_vid_config(adapter, true, vf);
+
+	if (status)
+		dev_info(&adapter->pdev->dev,
+				"VLAN %d config on VF %d failed\n", vlan, vf);
+	return status;
+}
+
+static int be_set_vf_tx_rate(struct net_device *netdev,
+			int vf, int rate)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+	int status = 0;
+
+	if (!adapter->sriov_enabled)
+		return -EPERM;
+
+	if ((vf >= num_vfs) || (rate < 0))
+		return -EINVAL;
+
+	if (rate > 10000)
+		rate = 10000;
+
+	adapter->vf_cfg[vf].vf_tx_rate = rate;
+	status = be_cmd_set_qos(adapter, rate / 10, vf);
+
+	if (status)
+		dev_info(&adapter->pdev->dev,
+				"tx rate %d on VF %d failed\n", rate, vf);
 	return status;
 }
 
@@ -875,7 +1033,7 @@
 
 	/* vlanf could be wrongly set in some cards.
 	 * ignore if vtm is not set */
-	if ((adapter->cap & 0x400) && !vtm)
+	if ((adapter->function_mode & 0x400) && !vtm)
 		vlanf = 0;
 
 	if (unlikely(vlanf)) {
@@ -915,7 +1073,7 @@
 
 	/* vlanf could be wrongly set in some cards.
 	 * ignore if vtm is not set */
-	if ((adapter->cap & 0x400) && !vtm)
+	if ((adapter->function_mode & 0x400) && !vtm)
 		vlanf = 0;
 
 	skb = napi_get_frags(&eq_obj->napi);
@@ -1585,12 +1743,66 @@
 	return 1;
 }
 
+static inline bool be_detect_ue(struct be_adapter *adapter)
+{
+	u32 online0 = 0, online1 = 0;
+
+	pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
+
+	pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
+
+	if (!online0 || !online1) {
+		adapter->ue_detected = true;
+		dev_err(&adapter->pdev->dev,
+			"UE Detected!! online0=%d online1=%d\n",
+			online0, online1);
+		return true;
+	}
+
+	return false;
+}
+
+void be_dump_ue(struct be_adapter *adapter)
+{
+	u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
+	u32 i;
+
+	pci_read_config_dword(adapter->pdev,
+				PCICFG_UE_STATUS_LOW, &ue_status_lo);
+	pci_read_config_dword(adapter->pdev,
+				PCICFG_UE_STATUS_HIGH, &ue_status_hi);
+	pci_read_config_dword(adapter->pdev,
+				PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
+	pci_read_config_dword(adapter->pdev,
+				PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
+
+	ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
+	ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+
+	if (ue_status_lo) {
+		for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
+			if (ue_status_lo & 1)
+				dev_err(&adapter->pdev->dev,
+				"UE: %s bit set\n", ue_status_low_desc[i]);
+		}
+	}
+	if (ue_status_hi) {
+		for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
+			if (ue_status_hi & 1)
+				dev_err(&adapter->pdev->dev,
+				"UE: %s bit set\n", ue_status_hi_desc[i]);
+		}
+	}
+
+}
+
 static void be_worker(struct work_struct *work)
 {
 	struct be_adapter *adapter =
 		container_of(work, struct be_adapter, work.work);
 
-	be_cmd_get_stats(adapter, &adapter->stats.cmd);
+	if (!adapter->stats_ioctl_sent)
+		be_cmd_get_stats(adapter, &adapter->stats.cmd);
 
 	/* Set EQ delay */
 	be_rx_eqd_update(adapter);
@@ -1602,6 +1814,10 @@
 		adapter->rx_post_starved = false;
 		be_post_rx_frags(adapter);
 	}
+	if (!adapter->ue_detected) {
+		if (be_detect_ue(adapter))
+			be_dump_ue(adapter);
+	}
 
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
@@ -1629,10 +1845,11 @@
 
 static void be_sriov_enable(struct be_adapter *adapter)
 {
-#ifdef CONFIG_PCI_IOV
-	int status;
 	be_check_sriov_fn_type(adapter);
+#ifdef CONFIG_PCI_IOV
 	if (be_physfn(adapter) && num_vfs) {
+		int status;
+
 		status = pci_enable_sriov(adapter->pdev, num_vfs);
 		adapter->sriov_enabled = status ? false : true;
 	}
@@ -1822,7 +2039,7 @@
 	be_link_status_update(adapter, link_up);
 
 	if (be_physfn(adapter)) {
-		status = be_vid_config(adapter);
+		status = be_vid_config(adapter, false, 0);
 		if (status)
 			goto err;
 
@@ -1903,13 +2120,15 @@
 			cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
 					| BE_IF_FLAGS_BROADCAST;
 			status = be_cmd_if_create(adapter, cap_flags, en_flags,
-					mac, true, &adapter->vf_if_handle[vf],
+					mac, true,
+					&adapter->vf_cfg[vf].vf_if_handle,
 					NULL, vf+1);
 			if (status) {
 				dev_err(&adapter->pdev->dev,
 				"Interface Create failed for VF %d\n", vf);
 				goto if_destroy;
 			}
+			adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
 			vf++;
 		}
 	} else if (!be_physfn(adapter)) {
@@ -1943,8 +2162,9 @@
 	be_tx_queues_destroy(adapter);
 if_destroy:
 	for (vf = 0; vf < num_vfs; vf++)
-		if (adapter->vf_if_handle[vf])
-			be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
+		if (adapter->vf_cfg[vf].vf_if_handle)
+			be_cmd_if_destroy(adapter,
+					adapter->vf_cfg[vf].vf_if_handle);
 	be_cmd_if_destroy(adapter, adapter->if_handle);
 do_none:
 	return status;
@@ -2187,7 +2407,10 @@
 	.ndo_vlan_rx_register	= be_vlan_register,
 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
-	.ndo_set_vf_mac		= be_set_vf_mac
+	.ndo_set_vf_mac		= be_set_vf_mac,
+	.ndo_set_vf_vlan	= be_set_vf_vlan,
+	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
+	.ndo_get_vf_config	= be_get_vf_config
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -2406,7 +2629,7 @@
 		return status;
 
 	status = be_cmd_query_fw_cfg(adapter,
-				&adapter->port_num, &adapter->cap);
+				&adapter->port_num, &adapter->function_mode);
 	if (status)
 		return status;
 
@@ -2426,7 +2649,7 @@
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 	}
 
-	if (adapter->cap & 0x400)
+	if (adapter->function_mode & 0x400)
 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
 	else
 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644
index 0000000..084afce
--- /dev/null
+++ b/drivers/net/bnx2x/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
similarity index 84%
rename from drivers/net/bnx2x.h
rename to drivers/net/bnx2x/bnx2x.h
index 8bd2368..53af9c9 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,6 +20,10 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
+#define DRV_MODULE_VERSION      "1.52.53-3"
+#define DRV_MODULE_RELDATE      "2010/18/04"
+#define BNX2X_BC_VER            0x040200
+
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define BCM_VLAN			1
 #endif
@@ -32,7 +36,7 @@
 
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
-#include "cnic_if.h"
+#include "../cnic_if.h"
 #endif
 
 
@@ -45,10 +49,12 @@
 #endif
 
 #include <linux/mdio.h>
+#include <linux/pci.h>
 #include "bnx2x_reg.h"
 #include "bnx2x_fw_defs.h"
 #include "bnx2x_hsi.h"
 #include "bnx2x_link.h"
+#include "bnx2x_stats.h"
 
 /* error/debug prints */
 
@@ -106,6 +112,7 @@
 		dev_info(&bp->pdev->dev, __fmt, ##__args);	 \
 } while (0)
 
+void bnx2x_panic_dump(struct bnx2x *bp);
 
 #ifdef BNX2X_STOP_ON_ERROR
 #define bnx2x_panic() do { \
@@ -248,43 +255,6 @@
 #define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
 
-struct bnx2x_eth_q_stats {
-	u32 total_bytes_received_hi;
-	u32 total_bytes_received_lo;
-	u32 total_bytes_transmitted_hi;
-	u32 total_bytes_transmitted_lo;
-	u32 total_unicast_packets_received_hi;
-	u32 total_unicast_packets_received_lo;
-	u32 total_multicast_packets_received_hi;
-	u32 total_multicast_packets_received_lo;
-	u32 total_broadcast_packets_received_hi;
-	u32 total_broadcast_packets_received_lo;
-	u32 total_unicast_packets_transmitted_hi;
-	u32 total_unicast_packets_transmitted_lo;
-	u32 total_multicast_packets_transmitted_hi;
-	u32 total_multicast_packets_transmitted_lo;
-	u32 total_broadcast_packets_transmitted_hi;
-	u32 total_broadcast_packets_transmitted_lo;
-	u32 valid_bytes_received_hi;
-	u32 valid_bytes_received_lo;
-
-	u32 error_bytes_received_hi;
-	u32 error_bytes_received_lo;
-	u32 etherstatsoverrsizepkts_hi;
-	u32 etherstatsoverrsizepkts_lo;
-	u32 no_buff_discard_hi;
-	u32 no_buff_discard_lo;
-
-	u32 driver_xoff;
-	u32 rx_err_discard_pkt;
-	u32 rx_skb_alloc_failed;
-	u32 hw_csum_err;
-};
-
-#define BNX2X_NUM_Q_STATS		13
-#define Q_STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
-
 struct bnx2x_fastpath {
 
 	struct napi_struct	napi;
@@ -593,27 +563,6 @@
 
 /* port */
 
-struct nig_stats {
-	u32 brb_discard;
-	u32 brb_packet;
-	u32 brb_truncate;
-	u32 flow_ctrl_discard;
-	u32 flow_ctrl_octets;
-	u32 flow_ctrl_packet;
-	u32 mng_discard;
-	u32 mng_octet_inp;
-	u32 mng_octet_out;
-	u32 mng_packet_inp;
-	u32 mng_packet_out;
-	u32 pbf_octets;
-	u32 pbf_packet;
-	u32 safc_inp;
-	u32 egress_mac_pkt0_lo;
-	u32 egress_mac_pkt0_hi;
-	u32 egress_mac_pkt1_lo;
-	u32 egress_mac_pkt1_hi;
-};
-
 struct bnx2x_port {
 	u32			pmf;
 
@@ -641,156 +590,6 @@
 /* end of port */
 
 
-enum bnx2x_stats_event {
-	STATS_EVENT_PMF = 0,
-	STATS_EVENT_LINK_UP,
-	STATS_EVENT_UPDATE,
-	STATS_EVENT_STOP,
-	STATS_EVENT_MAX
-};
-
-enum bnx2x_stats_state {
-	STATS_STATE_DISABLED = 0,
-	STATS_STATE_ENABLED,
-	STATS_STATE_MAX
-};
-
-struct bnx2x_eth_stats {
-	u32 total_bytes_received_hi;
-	u32 total_bytes_received_lo;
-	u32 total_bytes_transmitted_hi;
-	u32 total_bytes_transmitted_lo;
-	u32 total_unicast_packets_received_hi;
-	u32 total_unicast_packets_received_lo;
-	u32 total_multicast_packets_received_hi;
-	u32 total_multicast_packets_received_lo;
-	u32 total_broadcast_packets_received_hi;
-	u32 total_broadcast_packets_received_lo;
-	u32 total_unicast_packets_transmitted_hi;
-	u32 total_unicast_packets_transmitted_lo;
-	u32 total_multicast_packets_transmitted_hi;
-	u32 total_multicast_packets_transmitted_lo;
-	u32 total_broadcast_packets_transmitted_hi;
-	u32 total_broadcast_packets_transmitted_lo;
-	u32 valid_bytes_received_hi;
-	u32 valid_bytes_received_lo;
-
-	u32 error_bytes_received_hi;
-	u32 error_bytes_received_lo;
-	u32 etherstatsoverrsizepkts_hi;
-	u32 etherstatsoverrsizepkts_lo;
-	u32 no_buff_discard_hi;
-	u32 no_buff_discard_lo;
-
-	u32 rx_stat_ifhcinbadoctets_hi;
-	u32 rx_stat_ifhcinbadoctets_lo;
-	u32 tx_stat_ifhcoutbadoctets_hi;
-	u32 tx_stat_ifhcoutbadoctets_lo;
-	u32 rx_stat_dot3statsfcserrors_hi;
-	u32 rx_stat_dot3statsfcserrors_lo;
-	u32 rx_stat_dot3statsalignmenterrors_hi;
-	u32 rx_stat_dot3statsalignmenterrors_lo;
-	u32 rx_stat_dot3statscarriersenseerrors_hi;
-	u32 rx_stat_dot3statscarriersenseerrors_lo;
-	u32 rx_stat_falsecarriererrors_hi;
-	u32 rx_stat_falsecarriererrors_lo;
-	u32 rx_stat_etherstatsundersizepkts_hi;
-	u32 rx_stat_etherstatsundersizepkts_lo;
-	u32 rx_stat_dot3statsframestoolong_hi;
-	u32 rx_stat_dot3statsframestoolong_lo;
-	u32 rx_stat_etherstatsfragments_hi;
-	u32 rx_stat_etherstatsfragments_lo;
-	u32 rx_stat_etherstatsjabbers_hi;
-	u32 rx_stat_etherstatsjabbers_lo;
-	u32 rx_stat_maccontrolframesreceived_hi;
-	u32 rx_stat_maccontrolframesreceived_lo;
-	u32 rx_stat_bmac_xpf_hi;
-	u32 rx_stat_bmac_xpf_lo;
-	u32 rx_stat_bmac_xcf_hi;
-	u32 rx_stat_bmac_xcf_lo;
-	u32 rx_stat_xoffstateentered_hi;
-	u32 rx_stat_xoffstateentered_lo;
-	u32 rx_stat_xonpauseframesreceived_hi;
-	u32 rx_stat_xonpauseframesreceived_lo;
-	u32 rx_stat_xoffpauseframesreceived_hi;
-	u32 rx_stat_xoffpauseframesreceived_lo;
-	u32 tx_stat_outxonsent_hi;
-	u32 tx_stat_outxonsent_lo;
-	u32 tx_stat_outxoffsent_hi;
-	u32 tx_stat_outxoffsent_lo;
-	u32 tx_stat_flowcontroldone_hi;
-	u32 tx_stat_flowcontroldone_lo;
-	u32 tx_stat_etherstatscollisions_hi;
-	u32 tx_stat_etherstatscollisions_lo;
-	u32 tx_stat_dot3statssinglecollisionframes_hi;
-	u32 tx_stat_dot3statssinglecollisionframes_lo;
-	u32 tx_stat_dot3statsmultiplecollisionframes_hi;
-	u32 tx_stat_dot3statsmultiplecollisionframes_lo;
-	u32 tx_stat_dot3statsdeferredtransmissions_hi;
-	u32 tx_stat_dot3statsdeferredtransmissions_lo;
-	u32 tx_stat_dot3statsexcessivecollisions_hi;
-	u32 tx_stat_dot3statsexcessivecollisions_lo;
-	u32 tx_stat_dot3statslatecollisions_hi;
-	u32 tx_stat_dot3statslatecollisions_lo;
-	u32 tx_stat_etherstatspkts64octets_hi;
-	u32 tx_stat_etherstatspkts64octets_lo;
-	u32 tx_stat_etherstatspkts65octetsto127octets_hi;
-	u32 tx_stat_etherstatspkts65octetsto127octets_lo;
-	u32 tx_stat_etherstatspkts128octetsto255octets_hi;
-	u32 tx_stat_etherstatspkts128octetsto255octets_lo;
-	u32 tx_stat_etherstatspkts256octetsto511octets_hi;
-	u32 tx_stat_etherstatspkts256octetsto511octets_lo;
-	u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
-	u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
-	u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
-	u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
-	u32 tx_stat_etherstatspktsover1522octets_hi;
-	u32 tx_stat_etherstatspktsover1522octets_lo;
-	u32 tx_stat_bmac_2047_hi;
-	u32 tx_stat_bmac_2047_lo;
-	u32 tx_stat_bmac_4095_hi;
-	u32 tx_stat_bmac_4095_lo;
-	u32 tx_stat_bmac_9216_hi;
-	u32 tx_stat_bmac_9216_lo;
-	u32 tx_stat_bmac_16383_hi;
-	u32 tx_stat_bmac_16383_lo;
-	u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
-	u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
-	u32 tx_stat_bmac_ufl_hi;
-	u32 tx_stat_bmac_ufl_lo;
-
-	u32 pause_frames_received_hi;
-	u32 pause_frames_received_lo;
-	u32 pause_frames_sent_hi;
-	u32 pause_frames_sent_lo;
-
-	u32 etherstatspkts1024octetsto1522octets_hi;
-	u32 etherstatspkts1024octetsto1522octets_lo;
-	u32 etherstatspktsover1522octets_hi;
-	u32 etherstatspktsover1522octets_lo;
-
-	u32 brb_drop_hi;
-	u32 brb_drop_lo;
-	u32 brb_truncate_hi;
-	u32 brb_truncate_lo;
-
-	u32 mac_filter_discard;
-	u32 xxoverflow_discard;
-	u32 brb_truncate_discard;
-	u32 mac_discard;
-
-	u32 driver_xoff;
-	u32 rx_err_discard_pkt;
-	u32 rx_skb_alloc_failed;
-	u32 hw_csum_err;
-
-	u32 nig_timer_max;
-};
-
-#define BNX2X_NUM_STATS			43
-#define STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
-
 
 #ifdef BCM_CNIC
 #define MAX_CONTEXT			15
@@ -1006,6 +805,8 @@
 
 	int			multi_mode;
 	int			num_queues;
+	int			disable_tpa;
+	int			int_mode;
 
 	u32			rx_mode;
 #define BNX2X_RX_MODE_NONE		0
@@ -1062,6 +863,10 @@
 
 	/* used to synchronize stats collecting */
 	int			stats_state;
+
+	/* used for synchronization of concurrent threads statistics handling */
+	spinlock_t		stats_lock;
+
 	/* used by dmae command loader */
 	struct dmae_command	stats_dmae;
 	int			executer_idx;
@@ -1130,6 +935,10 @@
 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
 			       u32 addr, u32 len);
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+		  u32 data_hi, u32 data_lo, int common);
+void bnx2x_update_coalesce(struct bnx2x *bp);
 
 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 			   int wait)
@@ -1371,6 +1180,18 @@
 #define BNX2X_VPD_LEN			128
 #define VENDOR_ID_LEN			4
 
+#ifdef BNX2X_MAIN
+#define BNX2X_EXTERN
+#else
+#define BNX2X_EXTERN extern
+#endif
+
+BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
+
 /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
 
+extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644
index 0000000..02bf710
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,2252 @@
+/* bnx2x_cmn.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "bnx2x_cmn.h"
+
+#ifdef BCM_VLAN
+#include <linux/if_vlan.h>
+#endif
+
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			     u16 idx)
+{
+	struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_bd *tx_data_bd;
+	struct sk_buff *skb = tx_buf->skb;
+	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+	int nbd;
+
+	/* prefetch skb end pointer to speedup dev_kfree_skb() */
+	prefetch(&skb->end);
+
+	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
+	   idx, tx_buf, skb);
+
+	/* unmap first bd */
+	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+	tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
+	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+			 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
+
+	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+		BNX2X_ERR("BAD nbd!\n");
+		bnx2x_panic();
+	}
+#endif
+	new_cons = nbd + tx_buf->first_bd;
+
+	/* Get the next bd */
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	/* Skip a parse bd... */
+	--nbd;
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	/* ...and the TSO split header bd since they have no mapping */
+	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* now free frags */
+	while (nbd > 0) {
+
+		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+		tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
+		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+		if (--nbd)
+			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* release skb */
+	WARN_ON(!skb);
+	dev_kfree_skb(skb);
+	tx_buf->first_bd = 0;
+	tx_buf->skb = NULL;
+
+	return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+	struct netdev_queue *txq;
+	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -1;
+#endif
+
+	txq = netdev_get_tx_queue(bp->dev, fp->index);
+	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+	sw_cons = fp->tx_pkt_cons;
+
+	while (sw_cons != hw_cons) {
+		u16 pkt_cons;
+
+		pkt_cons = TX_BD(sw_cons);
+
+		/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
+
+		DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
+		   hw_cons, sw_cons, pkt_cons);
+
+/*		if (NEXT_TX_IDX(sw_cons) != hw_cons) {
+			rmb();
+			prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
+		}
+*/
+		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
+		sw_cons++;
+	}
+
+	fp->tx_pkt_cons = sw_cons;
+	fp->tx_bd_cons = bd_cons;
+
+	/* Need to make the tx_bd_cons update visible to start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that
+	 * start_xmit() will miss it and cause the queue to be stopped
+	 * forever.
+	 */
+	smp_mb();
+
+	/* TBD need a thresh? */
+	if (unlikely(netif_tx_queue_stopped(txq))) {
+		/* Taking tx_lock() is needed to prevent reenabling the queue
+		 * while it's empty. This could have happen if rx_action() gets
+		 * suspended in bnx2x_tx_int() after the condition before
+		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+		 *
+		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
+		 * sends some packets consuming the whole queue again->
+		 * stops the queue
+		 */
+
+		__netif_tx_lock(txq, smp_processor_id());
+
+		if ((netif_tx_queue_stopped(txq)) &&
+		    (bp->state == BNX2X_STATE_OPEN) &&
+		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
+			netif_tx_wake_queue(txq);
+
+		__netif_tx_unlock(txq);
+	}
+	return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+					     u16 idx)
+{
+	u16 last_max = fp->last_max_sge;
+
+	if (SUB_S16(idx, last_max) > 0)
+		fp->last_max_sge = idx;
+}
+
+static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+				  struct eth_fast_path_rx_cqe *fp_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+				     le16_to_cpu(fp_cqe->len_on_bd)) >>
+		      SGE_PAGE_SHIFT;
+	u16 last_max, last_elem, first_elem;
+	u16 delta = 0;
+	u16 i;
+
+	if (!sge_len)
+		return;
+
+	/* First mark all used pages */
+	for (i = 0; i < sge_len; i++)
+		SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
+
+	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+	   sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+
+	/* Here we assume that the last SGE index is the biggest */
+	prefetch((void *)(fp->sge_mask));
+	bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+
+	last_max = RX_SGE(fp->last_max_sge);
+	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
+	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+
+	/* If ring is not full */
+	if (last_elem + 1 != first_elem)
+		last_elem++;
+
+	/* Now update the prod */
+	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+		if (likely(fp->sge_mask[i]))
+			break;
+
+		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
+		delta += RX_SGE_MASK_ELEM_SZ;
+	}
+
+	if (delta > 0) {
+		fp->rx_sge_prod += delta;
+		/* clear page-end entries */
+		bnx2x_clear_sge_mask_next_elems(fp);
+	}
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
+	   fp->last_max_sge, fp->rx_sge_prod);
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+			    struct sk_buff *skb, u16 cons, u16 prod)
+{
+	struct bnx2x *bp = fp->bp;
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+	dma_addr_t mapping;
+
+	/* move empty skb from pool to prod and map it */
+	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
+	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+				 bp->rx_buf_size, DMA_FROM_DEVICE);
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+	/* move partial skb from cons to pool (don't unmap yet) */
+	fp->tpa_pool[queue] = *cons_rx_buf;
+
+	/* mark bin state as start - print error if current state != stop */
+	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
+		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+	fp->tpa_state[queue] = BNX2X_TPA_START;
+
+	/* point prod_bd to new skb */
+	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+#ifdef BNX2X_STOP_ON_ERROR
+	fp->tpa_queue_used |= (1 << queue);
+#ifdef _ASM_GENERIC_INT_L64_H
+	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
+#else
+	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+#endif
+	   fp->tpa_queue_used);
+#endif
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       struct sk_buff *skb,
+			       struct eth_fast_path_rx_cqe *fp_cqe,
+			       u16 cqe_idx)
+{
+	struct sw_rx_page *rx_pg, old_rx_pg;
+	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
+	u32 i, frag_len, frag_size, pages;
+	int err;
+	int j;
+
+	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
+	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+	/* This is needed in order to enable forwarding support */
+	if (frag_size)
+		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
+					       max(frag_size, (u32)len_on_bd));
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+			  pages, cqe_idx);
+		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
+			  fp_cqe->pkt_len, len_on_bd);
+		bnx2x_panic();
+		return -EINVAL;
+	}
+#endif
+
+	/* Run through the SGL and compose the fragmented skb */
+	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+		u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
+
+		/* FW gives the indices of the SGE as if the ring is an array
+		   (meaning that "next" element will consume 2 indices) */
+		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+		rx_pg = &fp->rx_page_ring[sge_idx];
+		old_rx_pg = *rx_pg;
+
+		/* If we fail to allocate a substitute page, we simply stop
+		   where we are and drop the whole packet */
+		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+		if (unlikely(err)) {
+			fp->eth_q_stats.rx_skb_alloc_failed++;
+			return err;
+		}
+
+		/* Unmap the page as we r going to pass it to the stack */
+		dma_unmap_page(&bp->pdev->dev,
+			       dma_unmap_addr(&old_rx_pg, mapping),
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+
+		/* Add one frag and update the appropriate fields in the skb */
+		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+
+		skb->data_len += frag_len;
+		skb->truesize += frag_len;
+		skb->len += frag_len;
+
+		frag_size -= frag_len;
+	}
+
+	return 0;
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
+			   u16 cqe_idx)
+{
+	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
+	struct sk_buff *skb = rx_buf->skb;
+	/* alloc new skb */
+	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+
+	/* Unmap skb in the pool anyway, as we are going to change
+	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
+	   fails. */
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+			 bp->rx_buf_size, DMA_FROM_DEVICE);
+
+	if (likely(new_skb)) {
+		/* fix ip xsum and give it to the stack */
+		/* (no need to map the new skb) */
+#ifdef BCM_VLAN
+		int is_vlan_cqe =
+			(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+			 PARSING_FLAGS_VLAN);
+		int is_not_hwaccel_vlan_cqe =
+			(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
+#endif
+
+		prefetch(skb);
+		prefetch(((char *)(skb)) + 128);
+
+#ifdef BNX2X_STOP_ON_ERROR
+		if (pad + len > bp->rx_buf_size) {
+			BNX2X_ERR("skb_put is about to fail...  "
+				  "pad %d  len %d  rx_buf_size %d\n",
+				  pad, len, bp->rx_buf_size);
+			bnx2x_panic();
+			return;
+		}
+#endif
+
+		skb_reserve(skb, pad);
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, bp->dev);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		{
+			struct iphdr *iph;
+
+			iph = (struct iphdr *)skb->data;
+#ifdef BCM_VLAN
+			/* If there is no Rx VLAN offloading -
+			   take VLAN tag into an account */
+			if (unlikely(is_not_hwaccel_vlan_cqe))
+				iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
+#endif
+			iph->check = 0;
+			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
+		}
+
+		if (!bnx2x_fill_frag_skb(bp, fp, skb,
+					 &cqe->fast_path_cqe, cqe_idx)) {
+#ifdef BCM_VLAN
+			if ((bp->vlgrp != NULL) && is_vlan_cqe &&
+			    (!is_not_hwaccel_vlan_cqe))
+				vlan_gro_receive(&fp->napi, bp->vlgrp,
+						 le16_to_cpu(cqe->fast_path_cqe.
+							     vlan_tag), skb);
+			else
+#endif
+				napi_gro_receive(&fp->napi, skb);
+		} else {
+			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+			   " - dropping packet!\n");
+			dev_kfree_skb(skb);
+		}
+
+
+		/* put new skb in bin */
+		fp->tpa_pool[queue].skb = new_skb;
+
+	} else {
+		/* else drop the packet and keep the buffer in the bin */
+		DP(NETIF_MSG_RX_STATUS,
+		   "Failed to allocate new skb - dropping packet!\n");
+		fp->eth_q_stats.rx_skb_alloc_failed++;
+	}
+
+	fp->tpa_state[queue] = BNX2X_TPA_STOP;
+}
+
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+					struct sk_buff *skb)
+{
+	/* Set Toeplitz hash from CQE */
+	if ((bp->dev->features & NETIF_F_RXHASH) &&
+	    (cqe->fast_path_cqe.status_flags &
+	     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+		skb->rxhash =
+		le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+	int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return 0;
+#endif
+
+	/* CQ "next element" is of the size of the regular element,
+	   that's why it's ok here */
+	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+		hw_comp_cons++;
+
+	bd_cons = fp->rx_bd_cons;
+	bd_prod = fp->rx_bd_prod;
+	bd_prod_fw = bd_prod;
+	sw_comp_cons = fp->rx_comp_cons;
+	sw_comp_prod = fp->rx_comp_prod;
+
+	/* Memory barrier necessary as speculative reads of the rx
+	 * buffer can be ahead of the index in the status block
+	 */
+	rmb();
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
+	   fp->index, hw_comp_cons, sw_comp_cons);
+
+	while (sw_comp_cons != hw_comp_cons) {
+		struct sw_rx_bd *rx_buf = NULL;
+		struct sk_buff *skb;
+		union eth_rx_cqe *cqe;
+		u8 cqe_fp_flags;
+		u16 len, pad;
+
+		comp_ring_cons = RCQ_BD(sw_comp_cons);
+		bd_prod = RX_BD(bd_prod);
+		bd_cons = RX_BD(bd_cons);
+
+		/* Prefetch the page containing the BD descriptor
+		   at producer's index. It will be needed when new skb is
+		   allocated */
+		prefetch((void *)(PAGE_ALIGN((unsigned long)
+					     (&fp->rx_desc_ring[bd_prod])) -
+				  PAGE_SIZE + 1));
+
+		cqe = &fp->rx_comp_ring[comp_ring_cons];
+		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+
+		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
+		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
+		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
+		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
+		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
+		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
+
+		/* is this a slowpath msg? */
+		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
+			bnx2x_sp_event(fp, cqe);
+			goto next_cqe;
+
+		/* this is an rx packet */
+		} else {
+			rx_buf = &fp->rx_buf_ring[bd_cons];
+			skb = rx_buf->skb;
+			prefetch(skb);
+			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+			pad = cqe->fast_path_cqe.placement_offset;
+
+			/* If CQE is marked both TPA_START and TPA_END
+			   it is a non-TPA CQE */
+			if ((!fp->disable_tpa) &&
+			    (TPA_TYPE(cqe_fp_flags) !=
+					(TPA_TYPE_START | TPA_TYPE_END))) {
+				u16 queue = cqe->fast_path_cqe.queue_index;
+
+				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
+					DP(NETIF_MSG_RX_STATUS,
+					   "calling tpa_start on queue %d\n",
+					   queue);
+
+					bnx2x_tpa_start(fp, queue, skb,
+							bd_cons, bd_prod);
+
+					/* Set Toeplitz hash for an LRO skb */
+					bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+					goto next_rx;
+				}
+
+				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
+					DP(NETIF_MSG_RX_STATUS,
+					   "calling tpa_stop on queue %d\n",
+					   queue);
+
+					if (!BNX2X_RX_SUM_FIX(cqe))
+						BNX2X_ERR("STOP on none TCP "
+							  "data\n");
+
+					/* This is a size of the linear data
+					   on this skb */
+					len = le16_to_cpu(cqe->fast_path_cqe.
+								len_on_bd);
+					bnx2x_tpa_stop(bp, fp, queue, pad,
+						    len, cqe, comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+					if (bp->panic)
+						return 0;
+#endif
+
+					bnx2x_update_sge_prod(fp,
+							&cqe->fast_path_cqe);
+					goto next_cqe;
+				}
+			}
+
+			dma_sync_single_for_device(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+						   pad + RX_COPY_THRESH,
+						   DMA_FROM_DEVICE);
+			prefetch(((char *)(skb)) + 128);
+
+			/* is this an error packet? */
+			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR  flags %x  rx packet %u\n",
+				   cqe_fp_flags, sw_comp_cons);
+				fp->eth_q_stats.rx_err_discard_pkt++;
+				goto reuse_rx;
+			}
+
+			/* Since we don't have a jumbo ring
+			 * copy small packets if mtu > 1500
+			 */
+			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+			    (len <= RX_COPY_THRESH)) {
+				struct sk_buff *new_skb;
+
+				new_skb = netdev_alloc_skb(bp->dev,
+							   len + pad);
+				if (new_skb == NULL) {
+					DP(NETIF_MSG_RX_ERR,
+					   "ERROR  packet dropped "
+					   "because of alloc failure\n");
+					fp->eth_q_stats.rx_skb_alloc_failed++;
+					goto reuse_rx;
+				}
+
+				/* aligned copy */
+				skb_copy_from_linear_data_offset(skb, pad,
+						    new_skb->data + pad, len);
+				skb_reserve(new_skb, pad);
+				skb_put(new_skb, len);
+
+				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+
+				skb = new_skb;
+
+			} else
+			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+				dma_unmap_single(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+						 bp->rx_buf_size,
+						 DMA_FROM_DEVICE);
+				skb_reserve(skb, pad);
+				skb_put(skb, len);
+
+			} else {
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR  packet dropped because "
+				   "of alloc failure\n");
+				fp->eth_q_stats.rx_skb_alloc_failed++;
+reuse_rx:
+				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+				goto next_rx;
+			}
+
+			skb->protocol = eth_type_trans(skb, bp->dev);
+
+			/* Set Toeplitz hash for a none-LRO skb */
+			bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+			skb->ip_summed = CHECKSUM_NONE;
+			if (bp->rx_csum) {
+				if (likely(BNX2X_RX_CSUM_OK(cqe)))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					fp->eth_q_stats.hw_csum_err++;
+			}
+		}
+
+		skb_record_rx_queue(skb, fp->index);
+
+#ifdef BCM_VLAN
+		if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
+		    (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+		     PARSING_FLAGS_VLAN))
+			vlan_gro_receive(&fp->napi, bp->vlgrp,
+				le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
+		else
+#endif
+			napi_gro_receive(&fp->napi, skb);
+
+
+next_rx:
+		rx_buf->skb = NULL;
+
+		bd_cons = NEXT_RX_IDX(bd_cons);
+		bd_prod = NEXT_RX_IDX(bd_prod);
+		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+		rx_pkt++;
+next_cqe:
+		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+		if (rx_pkt == budget)
+			break;
+	} /* while */
+
+	fp->rx_bd_cons = bd_cons;
+	fp->rx_bd_prod = bd_prod_fw;
+	fp->rx_comp_cons = sw_comp_cons;
+	fp->rx_comp_prod = sw_comp_prod;
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+			     fp->rx_sge_prod);
+
+	fp->rx_pkt += rx_pkt;
+	fp->rx_calls++;
+
+	return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+	struct bnx2x_fastpath *fp = fp_cookie;
+	struct bnx2x *bp = fp->bp;
+
+	/* Return here if interrupt is disabled */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+		return IRQ_HANDLED;
+	}
+
+	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
+	   fp->index, fp->sb_id);
+	bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	/* Handle Rx and Tx according to MSI-X vector */
+	prefetch(fp->rx_cons_sb);
+	prefetch(fp->tx_cons_sb);
+	prefetch(&fp->status_blk->u_status_block.status_block_index);
+	prefetch(&fp->status_blk->c_status_block.status_block_index);
+	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+
+	return IRQ_HANDLED;
+}
+
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+	mutex_lock(&bp->port.phy_mutex);
+
+	if (bp->port.need_hw_lock)
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+	if (bp->port.need_hw_lock)
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+	mutex_unlock(&bp->port.phy_mutex);
+}
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+	if (bp->flags & MF_FUNC_DIS) {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+		return;
+	}
+
+	if (bp->link_vars.link_up) {
+		u16 line_speed;
+
+		if (bp->state == BNX2X_STATE_OPEN)
+			netif_carrier_on(bp->dev);
+		netdev_info(bp->dev, "NIC Link is Up, ");
+
+		line_speed = bp->link_vars.line_speed;
+		if (IS_E1HMF(bp)) {
+			u16 vn_max_rate;
+
+			vn_max_rate =
+				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+				 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+			if (vn_max_rate < line_speed)
+				line_speed = vn_max_rate;
+		}
+		pr_cont("%d Mbps ", line_speed);
+
+		if (bp->link_vars.duplex == DUPLEX_FULL)
+			pr_cont("full duplex");
+		else
+			pr_cont("half duplex");
+
+		if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
+			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
+				pr_cont(", receive ");
+				if (bp->link_vars.flow_ctrl &
+				    BNX2X_FLOW_CTRL_TX)
+					pr_cont("& transmit ");
+			} else {
+				pr_cont(", transmit ");
+			}
+			pr_cont("flow control ON");
+		}
+		pr_cont("\n");
+
+	} else { /* link_down */
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+	}
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+					      ETH_MAX_AGGREGATION_QUEUES_E1H;
+	u16 ring_prod, cqe_ring_prod;
+	int i, j;
+
+	bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
+	DP(NETIF_MSG_IFUP,
+	   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
+
+	if (bp->flags & TPA_ENABLE_FLAG) {
+
+		for_each_queue(bp, j) {
+			struct bnx2x_fastpath *fp = &bp->fp[j];
+
+			for (i = 0; i < max_agg_queues; i++) {
+				fp->tpa_pool[i].skb =
+				   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+				if (!fp->tpa_pool[i].skb) {
+					BNX2X_ERR("Failed to allocate TPA "
+						  "skb pool for queue[%d] - "
+						  "disabling TPA on this "
+						  "queue!\n", j);
+					bnx2x_free_tpa_pool(bp, fp, i);
+					fp->disable_tpa = 1;
+					break;
+				}
+				dma_unmap_addr_set((struct sw_rx_bd *)
+							&bp->fp->tpa_pool[i],
+						   mapping, 0);
+				fp->tpa_state[i] = BNX2X_TPA_STOP;
+			}
+		}
+	}
+
+	for_each_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		fp->rx_bd_cons = 0;
+		fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+		fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
+
+		/* "next page" elements initialization */
+		/* SGE ring */
+		for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+			struct eth_rx_sge *sge;
+
+			sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+			sge->addr_hi =
+				cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+			sge->addr_lo =
+				cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+		}
+
+		bnx2x_init_sge_ring_bit_mask(fp);
+
+		/* RX BD ring */
+		for (i = 1; i <= NUM_RX_RINGS; i++) {
+			struct eth_rx_bd *rx_bd;
+
+			rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+			rx_bd->addr_hi =
+				cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+			rx_bd->addr_lo =
+				cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+		}
+
+		/* CQ ring */
+		for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+			struct eth_rx_cqe_next_page *nextpg;
+
+			nextpg = (struct eth_rx_cqe_next_page *)
+				&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+			nextpg->addr_hi =
+				cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+			nextpg->addr_lo =
+				cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+		}
+
+		/* Allocate SGEs and initialize the ring elements */
+		for (i = 0, ring_prod = 0;
+		     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+			if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+				BNX2X_ERR("was only able to allocate "
+					  "%d rx sges\n", i);
+				BNX2X_ERR("disabling TPA for queue[%d]\n", j);
+				/* Cleanup already allocated elements */
+				bnx2x_free_rx_sge_range(bp, fp, ring_prod);
+				bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
+				fp->disable_tpa = 1;
+				ring_prod = 0;
+				break;
+			}
+			ring_prod = NEXT_SGE_IDX(ring_prod);
+		}
+		fp->rx_sge_prod = ring_prod;
+
+		/* Allocate BDs and initialize BD ring */
+		fp->rx_comp_cons = 0;
+		cqe_ring_prod = ring_prod = 0;
+		for (i = 0; i < bp->rx_ring_size; i++) {
+			if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+				BNX2X_ERR("was only able to allocate "
+					  "%d rx skbs on queue[%d]\n", i, j);
+				fp->eth_q_stats.rx_skb_alloc_failed++;
+				break;
+			}
+			ring_prod = NEXT_RX_IDX(ring_prod);
+			cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+			WARN_ON(ring_prod <= i);
+		}
+
+		fp->rx_bd_prod = ring_prod;
+		/* must not have more available CQEs than BDs */
+		fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+					 cqe_ring_prod);
+		fp->rx_pkt = fp->rx_calls = 0;
+
+		/* Warning!
+		 * this will generate an interrupt (to the TSTORM)
+		 * must only be done after chip is initialized
+		 */
+		bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
+				     fp->rx_sge_prod);
+		if (j != 0)
+			continue;
+
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+		       U64_LO(fp->rx_comp_mapping));
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+		       U64_HI(fp->rx_comp_mapping));
+	}
+}
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		u16 bd_cons = fp->tx_bd_cons;
+		u16 sw_prod = fp->tx_pkt_prod;
+		u16 sw_cons = fp->tx_pkt_cons;
+
+		while (sw_cons != sw_prod) {
+			bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
+			sw_cons++;
+		}
+	}
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+	int i, j;
+
+	for_each_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		for (i = 0; i < NUM_RX_BD; i++) {
+			struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+			struct sk_buff *skb = rx_buf->skb;
+
+			if (skb == NULL)
+				continue;
+
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_size, DMA_FROM_DEVICE);
+
+			rx_buf->skb = NULL;
+			dev_kfree_skb(skb);
+		}
+		if (!fp->disable_tpa)
+			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+					    ETH_MAX_AGGREGATION_QUEUES_E1 :
+					    ETH_MAX_AGGREGATION_QUEUES_E1H);
+	}
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs(bp);
+	bnx2x_free_rx_skbs(bp);
+}
+
+static void bnx2x_free_msix_irqs(struct bnx2x *bp)
+{
+	int i, offset = 1;
+
+	free_irq(bp->msix_table[0].vector, bp->dev);
+	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+	   bp->msix_table[0].vector);
+
+#ifdef BCM_CNIC
+	offset++;
+#endif
+	for_each_queue(bp, i) {
+		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
+		   "state %x\n", i, bp->msix_table[i + offset].vector,
+		   bnx2x_fp(bp, i, state));
+
+		free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
+	}
+}
+
+void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
+{
+	if (bp->flags & USING_MSIX_FLAG) {
+		if (!disable_only)
+			bnx2x_free_msix_irqs(bp);
+		pci_disable_msix(bp->pdev);
+		bp->flags &= ~USING_MSIX_FLAG;
+
+	} else if (bp->flags & USING_MSI_FLAG) {
+		if (!disable_only)
+			free_irq(bp->pdev->irq, bp->dev);
+		pci_disable_msi(bp->pdev);
+		bp->flags &= ~USING_MSI_FLAG;
+
+	} else if (!disable_only)
+		free_irq(bp->pdev->irq, bp->dev);
+}
+
+static int bnx2x_enable_msix(struct bnx2x *bp)
+{
+	int i, rc, offset = 1;
+	int igu_vec = 0;
+
+	bp->msix_table[0].entry = igu_vec;
+	DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
+
+#ifdef BCM_CNIC
+	igu_vec = BP_L_ID(bp) + offset;
+	bp->msix_table[1].entry = igu_vec;
+	DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
+	offset++;
+#endif
+	for_each_queue(bp, i) {
+		igu_vec = BP_L_ID(bp) + offset + i;
+		bp->msix_table[i + offset].entry = igu_vec;
+		DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
+		   "(fastpath #%u)\n", i + offset, igu_vec, i);
+	}
+
+	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
+			     BNX2X_NUM_QUEUES(bp) + offset);
+
+	/*
+	 * reconfigure number of tx/rx queues according to available
+	 * MSI-X vectors
+	 */
+	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+		/* vectors available for FP */
+		int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+		DP(NETIF_MSG_IFUP,
+		   "Trying to use less MSI-X vectors: %d\n", rc);
+
+		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+		if (rc) {
+			DP(NETIF_MSG_IFUP,
+			   "MSI-X is not attainable  rc %d\n", rc);
+			return rc;
+		}
+
+		bp->num_queues = min(bp->num_queues, fp_vec);
+
+		DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+				  bp->num_queues);
+	} else if (rc) {
+		DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
+		return rc;
+	}
+
+	bp->flags |= USING_MSIX_FLAG;
+
+	return 0;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+	int i, rc, offset = 1;
+
+	rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
+			 bp->dev->name, bp->dev);
+	if (rc) {
+		BNX2X_ERR("request sp irq failed\n");
+		return -EBUSY;
+	}
+
+#ifdef BCM_CNIC
+	offset++;
+#endif
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+			 bp->dev->name, i);
+
+		rc = request_irq(bp->msix_table[i + offset].vector,
+				 bnx2x_msix_fp_int, 0, fp->name, fp);
+		if (rc) {
+			BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
+			bnx2x_free_msix_irqs(bp);
+			return -EBUSY;
+		}
+
+		fp->state = BNX2X_FP_STATE_IRQ;
+	}
+
+	i = BNX2X_NUM_QUEUES(bp);
+	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
+	       " ... fp[%d] %d\n",
+	       bp->msix_table[0].vector,
+	       0, bp->msix_table[offset].vector,
+	       i - 1, bp->msix_table[offset + i - 1].vector);
+
+	return 0;
+}
+
+static int bnx2x_enable_msi(struct bnx2x *bp)
+{
+	int rc;
+
+	rc = pci_enable_msi(bp->pdev);
+	if (rc) {
+		DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+		return -1;
+	}
+	bp->flags |= USING_MSI_FLAG;
+
+	return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+	unsigned long flags;
+	int rc;
+
+	if (bp->flags & USING_MSI_FLAG)
+		flags = 0;
+	else
+		flags = IRQF_SHARED;
+
+	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
+			 bp->dev->name, bp->dev);
+	if (!rc)
+		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
+
+	return rc;
+}
+
+static void bnx2x_napi_enable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i)
+		napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static void bnx2x_napi_disable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i)
+		napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+	int intr_sem;
+
+	intr_sem = atomic_dec_and_test(&bp->intr_sem);
+	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
+
+	if (intr_sem) {
+		if (netif_running(bp->dev)) {
+			bnx2x_napi_enable(bp);
+			bnx2x_int_enable(bp);
+			if (bp->state == BNX2X_STATE_OPEN)
+				netif_tx_wake_all_queues(bp->dev);
+		}
+	}
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+	bnx2x_int_disable_sync(bp, disable_hw);
+	bnx2x_napi_disable(bp);
+	netif_tx_disable(bp->dev);
+}
+static int bnx2x_set_num_queues(struct bnx2x *bp)
+{
+	int rc = 0;
+
+	switch (bp->int_mode) {
+	case INT_MODE_INTx:
+	case INT_MODE_MSI:
+		bp->num_queues = 1;
+		DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+		break;
+	default:
+		/* Set number of queues according to bp->multi_mode value */
+		bnx2x_set_num_queues_msix(bp);
+
+		DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+		   bp->num_queues);
+
+		/* if we can't use MSI-X we only need one fp,
+		 * so try to enable MSI-X with the requested number of fp's
+		 * and fallback to MSI or legacy INTx with one fp
+		 */
+		rc = bnx2x_enable_msix(bp);
+		if (rc)
+			/* failed to enable MSI-X */
+			bp->num_queues = 1;
+		break;
+	}
+	bp->dev->real_num_tx_queues = bp->num_queues;
+	return rc;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+	u32 load_code;
+	int i, rc;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EPERM;
+#endif
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+	rc = bnx2x_set_num_queues(bp);
+
+	if (bnx2x_alloc_mem(bp)) {
+		bnx2x_free_irq(bp, true);
+		return -ENOMEM;
+	}
+
+	for_each_queue(bp, i)
+		bnx2x_fp(bp, i, disable_tpa) =
+					((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+	for_each_queue(bp, i)
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, 128);
+
+	bnx2x_napi_enable(bp);
+
+	if (bp->flags & USING_MSIX_FLAG) {
+		rc = bnx2x_req_msix_irqs(bp);
+		if (rc) {
+			bnx2x_free_irq(bp, true);
+			goto load_error1;
+		}
+	} else {
+		/* Fall to INTx if failed to enable MSI-X due to lack of
+		   memory (in bnx2x_set_num_queues()) */
+		if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
+			bnx2x_enable_msi(bp);
+		bnx2x_ack_int(bp);
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+			bnx2x_free_irq(bp, true);
+			goto load_error1;
+		}
+		if (bp->flags & USING_MSI_FLAG) {
+			bp->dev->irq = bp->pdev->irq;
+			netdev_info(bp->dev, "using MSI  IRQ %d\n",
+				    bp->pdev->irq);
+		}
+	}
+
+	/* Send LOAD_REQUEST command to MCP
+	   Returns the type of LOAD command:
+	   if it is the first port to be initialized
+	   common blocks should be initialized, otherwise - not
+	*/
+	if (!BP_NOMCP(bp)) {
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			goto load_error2;
+		}
+		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+			rc = -EBUSY; /* other port in diagnostic mode */
+			goto load_error2;
+		}
+
+	} else {
+		int port = BP_PORT(bp);
+
+		DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
+		   load_count[0], load_count[1], load_count[2]);
+		load_count[0]++;
+		load_count[1 + port]++;
+		DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
+		   load_count[0], load_count[1], load_count[2]);
+		if (load_count[0] == 1)
+			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+		else if (load_count[1 + port] == 1)
+			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
+		else
+			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+	}
+
+	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
+		bp->port.pmf = 1;
+	else
+		bp->port.pmf = 0;
+	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+	/* Initialize HW */
+	rc = bnx2x_init_hw(bp, load_code);
+	if (rc) {
+		BNX2X_ERR("HW init failed, aborting\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+		goto load_error2;
+	}
+
+	/* Setup NIC internals and enable interrupts */
+	bnx2x_nic_init(bp, load_code);
+
+	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
+	    (bp->common.shmem2_base))
+		SHMEM2_WR(bp, dcc_support,
+			  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+			   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+
+	/* Send LOAD_DONE command to MCP */
+	if (!BP_NOMCP(bp)) {
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			goto load_error3;
+		}
+	}
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+
+	rc = bnx2x_setup_leading(bp);
+	if (rc) {
+		BNX2X_ERR("Setup leading failed!\n");
+#ifndef BNX2X_STOP_ON_ERROR
+		goto load_error3;
+#else
+		bp->panic = 1;
+		return -EBUSY;
+#endif
+	}
+
+	if (CHIP_IS_E1H(bp))
+		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
+			DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+			bp->flags |= MF_FUNC_DIS;
+		}
+
+	if (bp->state == BNX2X_STATE_OPEN) {
+#ifdef BCM_CNIC
+		/* Enable Timer scan */
+		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+#endif
+		for_each_nondefault_queue(bp, i) {
+			rc = bnx2x_setup_multi(bp, i);
+			if (rc)
+#ifdef BCM_CNIC
+				goto load_error4;
+#else
+				goto load_error3;
+#endif
+		}
+
+		if (CHIP_IS_E1(bp))
+			bnx2x_set_eth_mac_addr_e1(bp, 1);
+		else
+			bnx2x_set_eth_mac_addr_e1h(bp, 1);
+#ifdef BCM_CNIC
+		/* Set iSCSI L2 MAC */
+		mutex_lock(&bp->cnic_mutex);
+		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
+			bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+			bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+			bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+				      CNIC_SB_ID(bp));
+		}
+		mutex_unlock(&bp->cnic_mutex);
+#endif
+	}
+
+	if (bp->port.pmf)
+		bnx2x_initial_phy_init(bp, load_mode);
+
+	/* Start fast path */
+	switch (load_mode) {
+	case LOAD_NORMAL:
+		if (bp->state == BNX2X_STATE_OPEN) {
+			/* Tx queue should be only reenabled */
+			netif_tx_wake_all_queues(bp->dev);
+		}
+		/* Initialize the receive filter. */
+		bnx2x_set_rx_mode(bp->dev);
+		break;
+
+	case LOAD_OPEN:
+		netif_tx_start_all_queues(bp->dev);
+		if (bp->state != BNX2X_STATE_OPEN)
+			netif_tx_disable(bp->dev);
+		/* Initialize the receive filter. */
+		bnx2x_set_rx_mode(bp->dev);
+		break;
+
+	case LOAD_DIAG:
+		/* Initialize the receive filter. */
+		bnx2x_set_rx_mode(bp->dev);
+		bp->state = BNX2X_STATE_DIAG;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!bp->port.pmf)
+		bnx2x__link_status_update(bp);
+
+	/* start the timer */
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+#ifdef BCM_CNIC
+	bnx2x_setup_cnic_irq_info(bp);
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
+	bnx2x_inc_load_cnt(bp);
+
+	return 0;
+
+#ifdef BCM_CNIC
+load_error4:
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+#endif
+load_error3:
+	bnx2x_int_disable_sync(bp, 1);
+	if (!BP_NOMCP(bp)) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+	}
+	bp->port.pmf = 0;
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+load_error2:
+	/* Release IRQs */
+	bnx2x_free_irq(bp, false);
+load_error1:
+	bnx2x_napi_disable(bp);
+	for_each_queue(bp, i)
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+	bnx2x_free_mem(bp);
+
+	return rc;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+	int i;
+
+	if (bp->state == BNX2X_STATE_CLOSED) {
+		/* Interface has been removed - nothing to recover */
+		bp->recovery_state = BNX2X_RECOVERY_DONE;
+		bp->is_leader = 0;
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+		smp_wmb();
+
+		return -EINVAL;
+	}
+
+#ifdef BCM_CNIC
+	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+	/* Set "drop all" */
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+	bnx2x_set_storm_rx_mode(bp);
+
+	/* Disable HW interrupts, NAPI and Tx */
+	bnx2x_netif_stop(bp, 1);
+	netif_carrier_off(bp->dev);
+
+	del_timer_sync(&bp->timer);
+	SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp, false);
+
+	/* Cleanup the chip if needed */
+	if (unload_mode != UNLOAD_RECOVERY)
+		bnx2x_chip_cleanup(bp, unload_mode);
+
+	bp->port.pmf = 0;
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+	for_each_queue(bp, i)
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+
+	/* The last driver must disable a "close the gate" if there is no
+	 * parity attention or "process kill" pending.
+	 */
+	if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+	    bnx2x_reset_is_done(bp))
+		bnx2x_disable_close_the_gate(bp);
+
+	/* Reset MCP mail box sequence if there is on going recovery */
+	if (unload_mode == UNLOAD_RECOVERY)
+		bp->fw_seq = 0;
+
+	return 0;
+}
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+	u16 pmcsr;
+
+	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	switch (state) {
+	case PCI_D0:
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+				       PCI_PM_CTRL_PME_STATUS));
+
+		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+			/* delay required during transition out of D3hot */
+			msleep(20);
+		break;
+
+	case PCI_D3hot:
+		/* If there are other clients above don't
+		   shut down the power */
+		if (atomic_read(&bp->pdev->enable_cnt) != 1)
+			return 0;
+		/* Don't shut down the power for emulation and FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			return 0;
+
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= 3;
+
+		if (bp->wol)
+			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      pmcsr);
+
+		/* No more memory access after this point until
+		* device is brought back to D0.
+		*/
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+
+/*
+ * net_device service functions
+ */
+
+static int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+	int work_done = 0;
+	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+						 napi);
+	struct bnx2x *bp = fp->bp;
+
+	while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic)) {
+			napi_complete(napi);
+			return 0;
+		}
+#endif
+
+		if (bnx2x_has_tx_work(fp))
+			bnx2x_tx_int(fp);
+
+		if (bnx2x_has_rx_work(fp)) {
+			work_done += bnx2x_rx_int(fp, budget - work_done);
+
+			/* must not complete if we consumed full budget */
+			if (work_done >= budget)
+				break;
+		}
+
+		/* Fall out from the NAPI loop if needed */
+		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+			bnx2x_update_fpsb_idx(fp);
+		/* bnx2x_has_rx_work() reads the status block, thus we need
+		 * to ensure that status block indices have been actually read
+		 * (bnx2x_update_fpsb_idx) prior to this check
+		 * (bnx2x_has_rx_work) so that we won't write the "newer"
+		 * value of the status block to IGU (if there was a DMA right
+		 * after bnx2x_has_rx_work and if there is no rmb, the memory
+		 * reading (bnx2x_update_fpsb_idx) may be postponed to right
+		 * before bnx2x_ack_sb). In this case there will never be
+		 * another interrupt until there is another update of the
+		 * status block, while there is still unhandled work.
+		 */
+			rmb();
+
+			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+				napi_complete(napi);
+				/* Re-enable interrupts */
+				bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
+					     le16_to_cpu(fp->fp_c_idx),
+					     IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
+					     le16_to_cpu(fp->fp_u_idx),
+					     IGU_INT_ENABLE, 1);
+				break;
+			}
+		}
+	}
+
+	return work_done;
+}
+
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ * So far this has only been observed to happen
+ * in Other Operating Systems(TM)
+ */
+static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+				   struct bnx2x_fastpath *fp,
+				   struct sw_tx_bd *tx_buf,
+				   struct eth_tx_start_bd **tx_bd, u16 hlen,
+				   u16 bd_prod, int nbd)
+{
+	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+	struct eth_tx_bd *d_tx_bd;
+	dma_addr_t mapping;
+	int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+	/* first fix first BD */
+	h_tx_bd->nbd = cpu_to_le16(nbd);
+	h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d "
+	   "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
+	   h_tx_bd->addr_lo, h_tx_bd->nbd);
+
+	/* now get a new data BD
+	 * (after the pbd) and fill it */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+	d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+
+	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+	/* this marks the BD as one that has no individual mapping */
+	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "TSO split data size is %d (%x:%x)\n",
+	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+	/* update tx_bd */
+	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+	return bd_prod;
+}
+
+static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+	if (fix > 0)
+		csum = (u16) ~csum_fold(csum_sub(csum,
+				csum_partial(t_header - fix, fix, 0)));
+
+	else if (fix < 0)
+		csum = (u16) ~csum_fold(csum_add(csum,
+				csum_partial(t_header, -fix, 0)));
+
+	return swab16(csum);
+}
+
+static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+	u32 rc;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		rc = XMIT_PLAIN;
+
+	else {
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			rc = XMIT_CSUM_V6;
+			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+
+		} else {
+			rc = XMIT_CSUM_V4;
+			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+		}
+	}
+
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+		rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+
+	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+
+	return rc;
+}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* check if packet requires linearization (packet is too fragmented)
+   no need to check fragmentation if page size > 8K (there will be no
+   violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+			     u32 xmit_type)
+{
+	int to_copy = 0;
+	int hlen = 0;
+	int first_bd_sz = 0;
+
+	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+
+		if (xmit_type & XMIT_GSO) {
+			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+			/* Check if LSO packet needs to be copied:
+			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
+			int wnd_size = MAX_FETCH_BD - 3;
+			/* Number of windows to check */
+			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+			int wnd_idx = 0;
+			int frag_idx = 0;
+			u32 wnd_sum = 0;
+
+			/* Headers length */
+			hlen = (int)(skb_transport_header(skb) - skb->data) +
+				tcp_hdrlen(skb);
+
+			/* Amount of data (w/o headers) on linear part of SKB*/
+			first_bd_sz = skb_headlen(skb) - hlen;
+
+			wnd_sum  = first_bd_sz;
+
+			/* Calculate the first sum - it's special */
+			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+				wnd_sum +=
+					skb_shinfo(skb)->frags[frag_idx].size;
+
+			/* If there was data on linear skb data - check it */
+			if (first_bd_sz > 0) {
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					goto exit_lbl;
+				}
+
+				wnd_sum -= first_bd_sz;
+			}
+
+			/* Others are easier: run through the frag list and
+			   check all windows */
+			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+				wnd_sum +=
+			  skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					break;
+				}
+				wnd_sum -=
+					skb_shinfo(skb)->frags[wnd_idx].size;
+			}
+		} else {
+			/* in non-LSO too fragmented packet should always
+			   be linearized */
+			to_copy = 1;
+		}
+	}
+
+exit_lbl:
+	if (unlikely(to_copy))
+		DP(NETIF_MSG_TX_QUEUED,
+		   "Linearization IS REQUIRED for %s packet. "
+		   "num_frags %d  hlen %d  first_bd_sz %d\n",
+		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+	return to_copy;
+}
+#endif
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_fastpath *fp;
+	struct netdev_queue *txq;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+	struct eth_tx_parse_bd *pbd = NULL;
+	u16 pkt_prod, bd_prod;
+	int nbd, fp_index;
+	dma_addr_t mapping;
+	u32 xmit_type = bnx2x_xmit_type(bp, skb);
+	int i;
+	u8 hlen = 0;
+	__le16 pkt_size = 0;
+	struct ethhdr *eth;
+	u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return NETDEV_TX_BUSY;
+#endif
+
+	fp_index = skb_get_queue_mapping(skb);
+	txq = netdev_get_tx_queue(dev, fp_index);
+
+	fp = &bp->fp[fp_index];
+
+	if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
+		fp->eth_q_stats.driver_xoff++;
+		netif_tx_stop_queue(txq);
+		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
+	   "  gso type %x  xmit_type %x\n",
+	   skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+
+	eth = (struct ethhdr *)skb->data;
+
+	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
+	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+		if (is_broadcast_ether_addr(eth->h_dest))
+			mac_type = BROADCAST_ADDRESS;
+		else
+			mac_type = MULTICAST_ADDRESS;
+	}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+	/* First, check if we need to linearize the skb (due to FW
+	   restrictions). No need to check fragmentation if page size > 8K
+	   (there will be no violation to FW restrictions) */
+	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+		/* Statistics of linearization */
+		bp->lin_cnt++;
+		if (skb_linearize(skb) != 0) {
+			DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
+			   "silently dropping this SKB\n");
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+	}
+#endif
+
+	/*
+	Please read carefully. First we use one BD which we mark as start,
+	then we have a parsing info BD (used for TSO or xsum),
+	and only then we have the rest of the TSO BDs.
+	(don't forget to mark the last one as last,
+	and to unmap only AFTER you write to the BD ...)
+	And above all, all pdb sizes are in words - NOT DWORDS!
+	*/
+
+	pkt_prod = fp->tx_pkt_prod++;
+	bd_prod = TX_BD(fp->tx_bd_prod);
+
+	/* get a tx_buf and first BD */
+	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
+
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+	tx_start_bd->general_data =  (mac_type <<
+					ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+	/* header nbd */
+	tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+	/* remember the first BD of the packet */
+	tx_buf->first_bd = fp->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
+	   pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
+
+#ifdef BCM_VLAN
+	if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
+	    (bp->flags & HW_VLAN_TX_FLAG)) {
+		tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
+	} else
+#endif
+		tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+	pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
+
+	memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+
+	if (xmit_type & XMIT_CSUM) {
+		hlen = (skb_network_header(skb) - skb->data) / 2;
+
+		/* for now NS flag is not used in Linux */
+		pbd->global_data =
+			(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+				 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
+
+		pbd->ip_hlen = (skb_transport_header(skb) -
+				skb_network_header(skb)) / 2;
+
+		hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
+
+		pbd->total_hlen = cpu_to_le16(hlen);
+		hlen = hlen*2;
+
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+		if (xmit_type & XMIT_CSUM_V4)
+			tx_start_bd->bd_flags.as_bitfield |=
+						ETH_TX_BD_FLAGS_IP_CSUM;
+		else
+			tx_start_bd->bd_flags.as_bitfield |=
+						ETH_TX_BD_FLAGS_IPV6;
+
+		if (xmit_type & XMIT_CSUM_TCP) {
+			pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+		} else {
+			s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+			pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
+
+			DP(NETIF_MSG_TX_QUEUED,
+			   "hlen %d  fix %d  csum before fix %x\n",
+			   le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
+
+			/* HW bug: fixup the CSUM */
+			pbd->tcp_pseudo_csum =
+				bnx2x_csum_fix(skb_transport_header(skb),
+					       SKB_CS(skb), fix);
+
+			DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+			   pbd->tcp_pseudo_csum);
+		}
+	}
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
+	tx_start_bd->nbd = cpu_to_le16(nbd);
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	pkt_size = tx_start_bd->nbytes;
+
+	DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
+	   "  nbytes %d  flags %x  vlan %x\n",
+	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+	   tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
+
+	if (xmit_type & XMIT_GSO) {
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
+		   skb->len, hlen, skb_headlen(skb),
+		   skb_shinfo(skb)->gso_size);
+
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+		if (unlikely(skb_headlen(skb) > hlen))
+			bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
+						 hlen, bd_prod, ++nbd);
+
+		pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+		pbd->tcp_flags = pbd_tcp_flags(skb);
+
+		if (xmit_type & XMIT_GSO_V4) {
+			pbd->ip_id = swab16(ip_hdr(skb)->id);
+			pbd->tcp_pseudo_csum =
+				swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+							  ip_hdr(skb)->daddr,
+							  0, IPPROTO_TCP, 0));
+
+		} else
+			pbd->tcp_pseudo_csum =
+				swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+							&ipv6_hdr(skb)->daddr,
+							0, IPPROTO_TCP, 0));
+
+		pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+	}
+	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+		tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+		if (total_pkt_bd == NULL)
+			total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+
+		mapping = dma_map_page(&bp->pdev->dev, frag->page,
+				       frag->page_offset,
+				       frag->size, DMA_TO_DEVICE);
+
+		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+		tx_data_bd->nbytes = cpu_to_le16(frag->size);
+		le16_add_cpu(&pkt_size, frag->size);
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
+		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+		   le16_to_cpu(tx_data_bd->nbytes));
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	/* now send a tx doorbell, counting the next BD
+	 * if the packet contains or ends with it
+	 */
+	if (TX_BD_POFF(bd_prod) < nbd)
+		nbd++;
+
+	if (total_pkt_bd != NULL)
+		total_pkt_bd->total_pkt_bytes = pkt_size;
+
+	if (pbd)
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
+		   "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
+		   pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
+		   pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
+		   pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
+
+	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
+
+	/*
+	 * Make sure that the BD data is updated before updating the producer
+	 * since FW might read the BD right after the producer is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes packets must have BDs.
+	 */
+	wmb();
+
+	fp->tx_db.data.prod += nbd;
+	barrier();
+	DOORBELL(bp, fp->index, fp->tx_db.raw);
+
+	mmiowb();
+
+	fp->tx_bd_prod += nbd;
+
+	if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+		netif_tx_stop_queue(txq);
+
+		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
+		 * ordering of set_bit() in netif_tx_stop_queue() and read of
+		 * fp->bd_tx_cons */
+		smp_mb();
+
+		fp->eth_q_stats.driver_xoff++;
+		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
+			netif_tx_wake_queue(txq);
+	}
+	fp->tx_pkt++;
+
+	return NETDEV_TX_OK;
+}
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev)) {
+		if (CHIP_IS_E1(bp))
+			bnx2x_set_eth_mac_addr_e1(bp, 1);
+		else
+			bnx2x_set_eth_mac_addr_e1h(bp, 1);
+	}
+
+	return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+		return -EINVAL;
+
+	/* This does not race with packet allocation
+	 * because the actual alloc size is
+	 * only updated as part of load
+	 */
+	dev->mtu = new_mtu;
+
+	if (netif_running(dev)) {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+
+	return rc;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (!bp->panic)
+		bnx2x_panic();
+#endif
+	/* This allows the netif to be shutdown gracefully before resetting */
+	schedule_delayed_work(&bp->reset_task, 0);
+}
+
+#ifdef BCM_VLAN
+/* called with rtnl_lock */
+void bnx2x_vlan_rx_register(struct net_device *dev,
+				   struct vlan_group *vlgrp)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->vlgrp = vlgrp;
+
+	/* Set flags according to the required capabilities */
+	bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
+
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		bp->flags |= HW_VLAN_TX_FLAG;
+
+	if (dev->features & NETIF_F_HW_VLAN_RX)
+		bp->flags |= HW_VLAN_RX_FLAG;
+
+	if (netif_running(dev))
+		bnx2x_set_client_config(bp);
+}
+
+#endif
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	pci_save_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	netif_device_detach(dev);
+
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+	int rc;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	rtnl_lock();
+
+	pci_restore_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+
+	rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+	rtnl_unlock();
+
+	return rc;
+}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644
index 0000000..d1979b1
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,652 @@
+/* bnx2x_cmn.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+#include "bnx2x.h"
+
+
+/*********************** Interfaces ****************************
+ *  Functions that need to be implemented by each driver version
+ */
+
+/**
+ * Initialize link parameters structure variables.
+ *
+ * @param bp
+ * @param load_mode
+ *
+ * @return u8
+ */
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * Configure hw according to link parameters structure.
+ *
+ * @param bp
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * Query link status
+ *
+ * @param bp
+ *
+ * @return 0 - link is UP
+ */
+u8 bnx2x_link_test(struct bnx2x *bp);
+
+/**
+ * Handles link status change
+ *
+ * @param bp
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * MSI-X slowpath interrupt handler
+ *
+ * @param irq
+ * @param dev_instance
+ *
+ * @return irqreturn_t
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * non MSI-X interrupt handler
+ *
+ * @param irq
+ * @param dev_instance
+ *
+ * @return irqreturn_t
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+#ifdef BCM_CNIC
+
+/**
+ * Send command to cnic driver
+ *
+ * @param bp
+ * @param cmd
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * Provides cnic information for proper interrupt handling
+ *
+ * @param bp
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+#endif
+
+/**
+ * Enable HW interrupts.
+ *
+ * @param bp
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * Disable interrupts. This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ *
+ * @param bp
+ * @param disable_hw if true, disable HW interrupts.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * Init HW blocks according to current initialization stage:
+ * COMMON, PORT or FUNCTION.
+ *
+ * @param bp
+ * @param load_code: COMMON, PORT or FUNCTION
+ *
+ * @return int
+ */
+int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
+
+/**
+ * Init driver internals:
+ *  - rings
+ *  - status blocks
+ *  - etc.
+ *
+ * @param bp
+ * @param load_code COMMON, PORT or FUNCTION
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+
+/**
+ * Allocate driver's memory.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * Release driver's memory.
+ *
+ * @param bp
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * Bring up a leading (the first) eth Client.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * Setup non-leading eth Client.
+ *
+ * @param bp
+ * @param fp
+ *
+ * @return int
+ */
+int bnx2x_setup_multi(struct bnx2x *bp, int index);
+
+/**
+ * Set number of quueus according to mode and number of available
+ * msi-x vectors
+ *
+ * @param bp
+ *
+ */
+void bnx2x_set_num_queues_msix(struct bnx2x *bp);
+
+/**
+ * Cleanup chip internals:
+ * - Cleanup MAC configuration.
+ * - Close clients.
+ * - etc.
+ *
+ * @param bp
+ * @param unload_mode
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+
+/**
+ * Acquire HW lock.
+ *
+ * @param bp
+ * @param resource Resource bit which was locked
+ *
+ * @return int
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * Release HW lock.
+ *
+ * @param bp driver handle
+ * @param resource Resource bit which was locked
+ *
+ * @return int
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * Configure eth MAC address in the HW according to the value in
+ * netdev->dev_addr for 57711
+ *
+ * @param bp driver handle
+ * @param set
+ */
+void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
+
+/**
+ * Configure eth MAC address in the HW according to the value in
+ * netdev->dev_addr for 57710
+ *
+ * @param bp driver handle
+ * @param set
+ */
+void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
+
+#ifdef BCM_CNIC
+/**
+ * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). The function will wait until the ramrod completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
+#endif
+
+/**
+ * Initialize status block in FW and HW
+ *
+ * @param bp driver handle
+ * @param sb host_status_block
+ * @param dma_addr_t mapping
+ * @param int sb_id
+ */
+void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+			  dma_addr_t mapping, int sb_id);
+
+/**
+ * Reconfigure FW/HW according to dev->flags rx mode
+ *
+ * @param dev net_device
+ *
+ */
+void bnx2x_set_rx_mode(struct net_device *dev);
+
+/**
+ * Configure MAC filtering rules in a FW.
+ *
+ * @param bp driver handle
+ */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/* Parity errors related */
+void bnx2x_inc_load_cnt(struct bnx2x *bp);
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp);
+bool bnx2x_reset_is_done(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+
+/**
+ * Perform statistics handling according to event
+ *
+ * @param bp driver handle
+ * @param even tbnx2x_stats_event
+ */
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+
+/**
+ * Configures FW with client paramteres (like HW VLAN removal)
+ * for each active client.
+ *
+ * @param bp
+ */
+void bnx2x_set_client_config(struct bnx2x *bp);
+
+/**
+ * Handle sp events
+ *
+ * @param fp fastpath handle for the event
+ * @param rr_cqe eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp,  union eth_rx_cqe *rr_cqe);
+
+
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+	struct host_status_block *fpsb = fp->status_blk;
+
+	barrier(); /* status block is written to by the chip */
+	fp->fp_c_idx = fpsb->c_status_block.status_block_index;
+	fp->fp_u_idx = fpsb->u_status_block.status_block_index;
+}
+
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+					struct bnx2x_fastpath *fp,
+					u16 bd_prod, u16 rx_comp_prod,
+					u16 rx_sge_prod)
+{
+	struct ustorm_eth_rx_producers rx_prods = {0};
+	int i;
+
+	/* Update producers */
+	rx_prods.bd_prod = bd_prod;
+	rx_prods.cqe_prod = rx_comp_prod;
+	rx_prods.sge_prod = rx_sge_prod;
+
+	/*
+	 * Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes BDs must have buffers.
+	 */
+	wmb();
+
+	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
+		       ((u32 *)&rx_prods)[i]);
+
+	mmiowb(); /* keep prod updates ordered */
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
+	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
+				u8 storm, u16 index, u8 op, u8 update)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+	   (*(u32 *)&igu_ack), hc_addr);
+	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+	/* Make sure that ACK is written */
+	mmiowb();
+	barrier();
+}
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_SIMD_MASK);
+	u32 result = REG_RD(bp, hc_addr);
+
+	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+	   result, hc_addr);
+
+	return result;
+}
+
+/*
+ * fast path service functions
+ */
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
+{
+	/* Tell compiler that consumer and producer can change */
+	barrier();
+	return (fp->tx_pkt_prod != fp->tx_pkt_cons);
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
+{
+	s16 used;
+	u16 prod;
+	u16 cons;
+
+	prod = fp->tx_bd_prod;
+	cons = fp->tx_bd_cons;
+
+	/* NUM_TX_RINGS = number of "next-page" entries
+	   It will be used as a threshold */
+	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	WARN_ON(used < 0);
+	WARN_ON(used > fp->bp->tx_ring_size);
+	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+	return (s16)(fp->bp->tx_ring_size) - used;
+}
+
+static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+	u16 hw_cons;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+	return hw_cons != fp->tx_pkt_cons;
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct page *page = sw_buf->page;
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+	/* Skip "next page" elements */
+	if (!page)
+		return;
+
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+	__free_pages(page, PAGES_PER_SGE_SHIFT);
+
+	sw_buf->page = NULL;
+	sge->addr_hi = 0;
+	sge->addr_lo = 0;
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+					   struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	for (i = 0; i < last; i++)
+		bnx2x_free_rx_sge(bp, fp, i);
+}
+
+static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+	dma_addr_t mapping;
+
+	if (unlikely(page == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_page(&bp->pdev->dev, page, 0,
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		__free_pages(page, PAGES_PER_SGE_SHIFT);
+		return -ENOMEM;
+	}
+
+	sw_buf->page = page;
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sk_buff *skb;
+	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+	dma_addr_t mapping;
+
+	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+	if (unlikely(skb == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+				 DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		dev_kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	rx_buf->skb = skb;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
+/* note that we are not allocating a new skb,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+			       struct sk_buff *skb, u16 cons, u16 prod)
+{
+	struct bnx2x *bp = fp->bp;
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+	dma_sync_single_for_device(&bp->pdev->dev,
+				   dma_unmap_addr(cons_rx_buf, mapping),
+				   RX_COPY_THRESH, DMA_FROM_DEVICE);
+
+	prod_rx_buf->skb = cons_rx_buf->skb;
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
+	*prod_bd = *cons_bd;
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+	int i, j;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		int idx = RX_SGE_CNT * i - 1;
+
+		for (j = 0; j < 2; j++) {
+			SGE_MASK_CLEAR_BIT(fp, idx);
+			idx--;
+		}
+	}
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+	memset(fp->sge_mask, 0xff,
+	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+
+	/* Clear the two last indices in the page to 1:
+	   these are the indices that correspond to the "next" element,
+	   hence will never be indicated and should be removed from
+	   the calculations. */
+	bnx2x_clear_sge_mask_next_elems(fp);
+}
+static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	for (i = 0; i < last; i++) {
+		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
+		struct sk_buff *skb = rx_buf->skb;
+
+		if (skb == NULL) {
+			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+			continue;
+		}
+
+		if (fp->tpa_state[i] == BNX2X_TPA_START)
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_size, DMA_FROM_DEVICE);
+
+		dev_kfree_skb(skb);
+		rx_buf->skb = NULL;
+	}
+}
+
+
+static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
+{
+	int i, j;
+
+	for_each_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		for (i = 1; i <= NUM_TX_RINGS; i++) {
+			struct eth_tx_next_bd *tx_next_bd =
+				&fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+			tx_next_bd->addr_hi =
+				cpu_to_le32(U64_HI(fp->tx_desc_mapping +
+					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+			tx_next_bd->addr_lo =
+				cpu_to_le32(U64_LO(fp->tx_desc_mapping +
+					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+		}
+
+		fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
+		fp->tx_db.data.zero_fill1 = 0;
+		fp->tx_db.data.prod = 0;
+
+		fp->tx_pkt_prod = 0;
+		fp->tx_pkt_cons = 0;
+		fp->tx_bd_prod = 0;
+		fp->tx_bd_cons = 0;
+		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+		fp->tx_pkt = 0;
+	}
+}
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+	u16 rx_cons_sb;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+		rx_cons_sb++;
+	return (fp->rx_comp_cons != rx_cons_sb);
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+void bnx2x_link_report(struct bnx2x *bp);
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+int bnx2x_tx_int(struct bnx2x_fastpath *fp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+void bnx2x_tx_timeout(struct net_device *dev);
+void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
+void bnx2x_netif_start(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+void bnx2x_free_skbs(struct bnx2x *bp);
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
similarity index 100%
rename from drivers/net/bnx2x_dump.h
rename to drivers/net/bnx2x/bnx2x_dump.h
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 0000000..8b75b05
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,1971 @@
+/* bnx2x_ethtool.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	cmd->supported = bp->port.supported;
+	cmd->advertising = bp->port.advertising;
+
+	if ((bp->state == BNX2X_STATE_OPEN) &&
+	    !(bp->flags & MF_FUNC_DIS) &&
+	    (bp->link_vars.link_up)) {
+		cmd->speed = bp->link_vars.line_speed;
+		cmd->duplex = bp->link_vars.duplex;
+		if (IS_E1HMF(bp)) {
+			u16 vn_max_rate;
+
+			vn_max_rate =
+				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+			if (vn_max_rate < cmd->speed)
+				cmd->speed = vn_max_rate;
+		}
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+
+	if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
+		u32 ext_phy_type =
+			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+			cmd->port = PORT_FIBRE;
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+			cmd->port = PORT_TP;
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
+				  bp->link_params.ext_phy_config);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+			   bp->link_params.ext_phy_config);
+			break;
+		}
+	} else
+		cmd->port = PORT_TP;
+
+	cmd->phy_address = bp->mdio.prtad;
+	cmd->transceiver = XCVR_INTERNAL;
+
+	if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+		cmd->autoneg = AUTONEG_ENABLE;
+	else
+		cmd->autoneg = AUTONEG_DISABLE;
+
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 advertising;
+
+	if (IS_E1HMF(bp))
+		return 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+			DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		/* advertise the requested speed and duplex if supported */
+		cmd->advertising &= bp->port.supported;
+
+		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+		bp->link_params.req_duplex = DUPLEX_FULL;
+		bp->port.advertising |= (ADVERTISED_Autoneg |
+					 cmd->advertising);
+
+	} else { /* forced speed */
+		/* advertise the requested speed and duplex if supported */
+		switch (cmd->speed) {
+		case SPEED_10:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported &
+				      SUPPORTED_10baseT_Full)) {
+					DP(NETIF_MSG_LINK,
+					   "10M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported &
+				      SUPPORTED_10baseT_Half)) {
+					DP(NETIF_MSG_LINK,
+					   "10M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_100:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported &
+						SUPPORTED_100baseT_Full)) {
+					DP(NETIF_MSG_LINK,
+					   "100M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported &
+						SUPPORTED_100baseT_Half)) {
+					DP(NETIF_MSG_LINK,
+					   "100M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_1000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK, "1G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
+				DP(NETIF_MSG_LINK, "1G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_1000baseT_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_2500:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK,
+				   "2.5G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
+				DP(NETIF_MSG_LINK,
+				   "2.5G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_2500baseX_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_10000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK, "10G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
+				DP(NETIF_MSG_LINK, "10G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_10000baseT_Full |
+				       ADVERTISED_FIBRE);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "Unsupported speed\n");
+			return -EINVAL;
+		}
+
+		bp->link_params.req_line_speed = cmd->speed;
+		bp->link_params.req_duplex = cmd->duplex;
+		bp->port.advertising = advertising;
+	}
+
+	DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+	   DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
+	   bp->link_params.req_line_speed, bp->link_params.req_duplex,
+	   bp->port.advertising);
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+#define IS_E1_ONLINE(info)	(((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
+#define IS_E1H_ONLINE(info)	(((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int regdump_len = 0;
+	int i;
+
+	if (CHIP_IS_E1(bp)) {
+		for (i = 0; i < REGS_COUNT; i++)
+			if (IS_E1_ONLINE(reg_addrs[i].info))
+				regdump_len += reg_addrs[i].size;
+
+		for (i = 0; i < WREGS_COUNT_E1; i++)
+			if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
+				regdump_len += wreg_addrs_e1[i].size *
+					(1 + wreg_addrs_e1[i].read_regs_count);
+
+	} else { /* E1H */
+		for (i = 0; i < REGS_COUNT; i++)
+			if (IS_E1H_ONLINE(reg_addrs[i].info))
+				regdump_len += reg_addrs[i].size;
+
+		for (i = 0; i < WREGS_COUNT_E1H; i++)
+			if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
+				regdump_len += wreg_addrs_e1h[i].size *
+					(1 + wreg_addrs_e1h[i].read_regs_count);
+	}
+	regdump_len *= 4;
+	regdump_len += sizeof(struct dump_hdr);
+
+	return regdump_len;
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+			   struct ethtool_regs *regs, void *_p)
+{
+	u32 *p = _p, i, j;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct dump_hdr dump_hdr = {0};
+
+	regs->version = 0;
+	memset(p, 0, regs->len);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
+	dump_hdr.dump_sign = dump_sign_all;
+	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
+	dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
+	dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
+	dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+	dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
+
+	memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
+	p += dump_hdr.hdr_size + 1;
+
+	if (CHIP_IS_E1(bp)) {
+		for (i = 0; i < REGS_COUNT; i++)
+			if (IS_E1_ONLINE(reg_addrs[i].info))
+				for (j = 0; j < reg_addrs[i].size; j++)
+					*p++ = REG_RD(bp,
+						      reg_addrs[i].addr + j*4);
+
+	} else { /* E1H */
+		for (i = 0; i < REGS_COUNT; i++)
+			if (IS_E1H_ONLINE(reg_addrs[i].info))
+				for (j = 0; j < reg_addrs[i].size; j++)
+					*p++ = REG_RD(bp,
+						      reg_addrs[i].addr + j*4);
+	}
+}
+
+#define PHY_FW_VER_LEN			10
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+
+	phy_fw_ver[0] = '\0';
+	if (bp->port.pmf) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_get_ext_phy_fw_version(&bp->link_params,
+					     (bp->state != BNX2X_STATE_CLOSED),
+					     phy_fw_ver, PHY_FW_VER_LEN);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	strncpy(info->fw_version, bp->fw_ver, 32);
+	snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+		 "bc %d.%d.%d%s%s",
+		 (bp->common.bc_ver & 0xff0000) >> 16,
+		 (bp->common.bc_ver & 0xff00) >> 8,
+		 (bp->common.bc_ver & 0xff),
+		 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+	strcpy(info->bus_info, pci_name(bp->pdev));
+	info->n_stats = BNX2X_NUM_STATS;
+	info->testinfo_len = BNX2X_NUM_TESTS;
+	info->eedump_len = bp->common.flash_size;
+	info->regdump_len = bnx2x_get_regs_len(dev);
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & NO_WOL_FLAG) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & NO_WOL_FLAG)
+			return -EINVAL;
+
+		bp->wol = 1;
+	} else
+		bp->wol = 0;
+
+	return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (capable(CAP_NET_ADMIN))
+		bp->msg_enable = level;
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bp->port.pmf)
+		return 0;
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & MF_FUNC_DIS)
+		return 0;
+
+	return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->common.flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* request access to nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+			break;
+
+		udelay(5);
+	}
+
+	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+		DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* relinquish nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+			break;
+
+		udelay(5);
+	}
+
+	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+		DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* enable both bits, even on read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
+		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* disable both bits, even after read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+				  u32 cmd_flags)
+{
+	int count, i, rc;
+	u32 val;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* address of the NVRAM to read from */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue a read command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	*ret_val = 0;
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+			/* we read nvram data in cpu order
+			 * but ethtool sees it as an array of bytes
+			 * converting to big-endian will do the work */
+			*ret_val = cpu_to_be32(val);
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+			    int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	__be32 val;
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	/* read the first word(s) */
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((buf_size > sizeof(u32)) && (rc == 0)) {
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		ret_buf += sizeof(u32);
+		buf_size -= sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	if (rc == 0) {
+		cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+				   u32 cmd_flags)
+{
+	int count, i, rc;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* write the data */
+	REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+	/* address of the NVRAM to write to */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue the write command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+#define BYTE_OFFSET(offset)		(8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			      int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 align_offset;
+	__be32 val;
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+	align_offset = (offset & ~0x03);
+	rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+	if (rc == 0) {
+		val &= ~(0xff << BYTE_OFFSET(offset));
+		val |= (*data_buf << BYTE_OFFSET(offset));
+
+		/* nvram data is returned as an array of bytes
+		 * convert it back to cpu order */
+		val = be32_to_cpu(val);
+
+		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+					     cmd_flags);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			     int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 val;
+	u32 written_so_far;
+
+	if (buf_size == 1)	/* ethtool */
+		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	written_so_far = 0;
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((written_so_far < buf_size) && (rc == 0)) {
+		if (written_so_far == (buf_size - sizeof(u32)))
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if ((offset % NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+		memcpy(&val, data_buf, 4);
+
+		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		data_buf += sizeof(u32);
+		written_so_far += sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int port = BP_PORT(bp);
+	int rc = 0;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	/* PHY eeprom can be accessed only by the PMF */
+	if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+	    !bp->port.pmf)
+		return -EINVAL;
+
+	if (eeprom->magic == 0x50485950) {
+		/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+		bnx2x_acquire_phy_lock(bp);
+		rc |= bnx2x_link_reset(&bp->link_params,
+				       &bp->link_vars, 0);
+		if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+					PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_HIGH, port);
+		bnx2x_release_phy_lock(bp);
+		bnx2x_link_report(bp);
+
+	} else if (eeprom->magic == 0x50485952) {
+		/* 'PHYR' (0x50485952): re-init link after FW upgrade */
+		if (bp->state == BNX2X_STATE_OPEN) {
+			bnx2x_acquire_phy_lock(bp);
+			rc |= bnx2x_link_reset(&bp->link_params,
+					       &bp->link_vars, 1);
+
+			rc |= bnx2x_phy_init(&bp->link_params,
+					     &bp->link_vars);
+			bnx2x_release_phy_lock(bp);
+			bnx2x_calc_fc_adv(bp);
+		}
+	} else if (eeprom->magic == 0x53985943) {
+		/* 'PHYC' (0x53985943): PHY FW upgrade completed */
+		if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+			u8 ext_phy_addr =
+			     XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+
+			/* DSP Remove Download Mode */
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_LOW, port);
+
+			bnx2x_acquire_phy_lock(bp);
+
+			bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
+
+			/* wait 0.5 sec to allow it to run */
+			msleep(500);
+			bnx2x_ext_phy_hw_reset(bp, port);
+			msleep(500);
+			bnx2x_release_phy_lock(bp);
+		}
+	} else
+		rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+static int bnx2x_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+
+	return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	if (netif_running(dev))
+		bnx2x_update_coalesce(bp);
+
+	return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = MAX_RX_AVAIL;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_mini_pending = 0;
+	ering->rx_jumbo_pending = 0;
+
+	ering->tx_max_pending = MAX_TX_AVAIL;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	if ((ering->rx_pending > MAX_RX_AVAIL) ||
+	    (ering->tx_pending > MAX_TX_AVAIL) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+		return -EINVAL;
+
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+
+	if (netif_running(dev)) {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+
+	return rc;
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	epause->autoneg = (bp->link_params.req_flow_ctrl ==
+			   BNX2X_FLOW_CTRL_AUTO) &&
+			  (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
+
+	epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+			    BNX2X_FLOW_CTRL_RX);
+	epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+			    BNX2X_FLOW_CTRL_TX);
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (IS_E1HMF(bp))
+		return 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+	bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+
+	if (epause->rx_pause)
+		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+	if (epause->tx_pause)
+		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+	if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
+		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	if (epause->autoneg) {
+		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+			DP(NETIF_MSG_LINK, "autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+			bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+	}
+
+	DP(NETIF_MSG_LINK,
+	   "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static int bnx2x_set_flags(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int changed = 0;
+	int rc = 0;
+
+	if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
+		return -EINVAL;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	/* TPA requires Rx CSUM offloading */
+	if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
+		if (!bp->disable_tpa) {
+			if (!(dev->features & NETIF_F_LRO)) {
+				dev->features |= NETIF_F_LRO;
+				bp->flags |= TPA_ENABLE_FLAG;
+				changed = 1;
+			}
+		} else
+			rc = -EINVAL;
+	} else if (dev->features & NETIF_F_LRO) {
+		dev->features &= ~NETIF_F_LRO;
+		bp->flags &= ~TPA_ENABLE_FLAG;
+		changed = 1;
+	}
+
+	if (data & ETH_FLAG_RXHASH)
+		dev->features |= NETIF_F_RXHASH;
+	else
+		dev->features &= ~NETIF_F_RXHASH;
+
+	if (changed && netif_running(dev)) {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+
+	return rc;
+}
+
+static u32 bnx2x_get_rx_csum(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->rx_csum;
+}
+
+static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	bp->rx_csum = data;
+
+	/* Disable TPA, when Rx CSUM is disabled. Otherwise all
+	   TPA'ed packets will be discarded due to wrong TCP CSUM */
+	if (!data) {
+		u32 flags = ethtool_op_get_flags(dev);
+
+		rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
+	}
+
+	return rc;
+}
+
+static int bnx2x_set_tso(struct net_device *dev, u32 data)
+{
+	if (data) {
+		dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+		dev->features |= NETIF_F_TSO6;
+	} else {
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+		dev->features &= ~NETIF_F_TSO6;
+	}
+
+	return 0;
+}
+
+static const struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+	{ "register_test (offline)" },
+	{ "memory_test (offline)" },
+	{ "loopback_test (offline)" },
+	{ "nvram_test (online)" },
+	{ "interrupt_test (online)" },
+	{ "link_test (online)" },
+	{ "idle check (online)" }
+};
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+	int idx, i, rc = -ENODEV;
+	u32 wr_val = 0;
+	int port = BP_PORT(bp);
+	static const struct {
+		u32 offset0;
+		u32 offset1;
+		u32 mask;
+	} reg_tbl[] = {
+/* 0 */		{ BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
+		{ DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
+		{ HC_REG_AGG_INT_0,                    4, 0x000003ff },
+		{ PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
+		{ PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
+		{ PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
+		{ PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
+		{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
+		{ PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
+		{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
+/* 10 */	{ PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
+		{ QM_REG_CONNNUM_0,                    4, 0x000fffff },
+		{ TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
+		{ SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
+		{ SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
+		{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+		{ XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
+		{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
+		{ NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
+		{ NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
+/* 20 */	{ NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
+		{ NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
+		{ NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
+		{ NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
+		{ NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
+		{ NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
+		{ NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
+		{ NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
+		{ NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
+		{ NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
+/* 30 */	{ NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
+		{ NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
+		{ NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
+		{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
+		{ NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
+		{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+		{ NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
+
+		{ 0xffffffff, 0, 0x00000000 }
+	};
+
+	if (!netif_running(bp->dev))
+		return rc;
+
+	/* Repeat the test twice:
+	   First by writing 0x00000000, second by writing 0xffffffff */
+	for (idx = 0; idx < 2; idx++) {
+
+		switch (idx) {
+		case 0:
+			wr_val = 0;
+			break;
+		case 1:
+			wr_val = 0xffffffff;
+			break;
+		}
+
+		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+			u32 offset, mask, save_val, val;
+
+			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+			mask = reg_tbl[i].mask;
+
+			save_val = REG_RD(bp, offset);
+
+			REG_WR(bp, offset, (wr_val & mask));
+			val = REG_RD(bp, offset);
+
+			/* Restore the original register's value */
+			REG_WR(bp, offset, save_val);
+
+			/* verify value is as expected */
+			if ((val & mask) != (wr_val & mask)) {
+				DP(NETIF_MSG_PROBE,
+				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+				   offset, val, wr_val, mask);
+				goto test_reg_exit;
+			}
+		}
+	}
+
+	rc = 0;
+
+test_reg_exit:
+	return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+	int i, j, rc = -ENODEV;
+	u32 val;
+	static const struct {
+		u32 offset;
+		int size;
+	} mem_tbl[] = {
+		{ CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
+		{ CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+		{ CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
+		{ DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
+		{ TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
+		{ UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
+		{ XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
+
+		{ 0xffffffff, 0 }
+	};
+	static const struct {
+		char *name;
+		u32 offset;
+		u32 e1_mask;
+		u32 e1h_mask;
+	} prty_tbl[] = {
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
+
+		{ NULL, 0xffffffff, 0, 0 }
+	};
+
+	if (!netif_running(bp->dev))
+		return rc;
+
+	/* Go through all the memories */
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+		for (j = 0; j < mem_tbl[i].size; j++)
+			REG_RD(bp, mem_tbl[i].offset + j*4);
+
+	/* Check the parity status */
+	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+		val = REG_RD(bp, prty_tbl[i].offset);
+		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
+			DP(NETIF_MSG_HW,
+			   "%s is 0x%x\n", prty_tbl[i].name, val);
+			goto test_mem_exit;
+		}
+	}
+
+	rc = 0;
+
+test_mem_exit:
+	return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
+{
+	int cnt = 1000;
+
+	if (link_up)
+		while (bnx2x_link_test(bp) && cnt--)
+			msleep(10);
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
+{
+	unsigned int pkt_size, num_pkts, i;
+	struct sk_buff *skb;
+	unsigned char *packet;
+	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+	u16 tx_start_idx, tx_idx;
+	u16 rx_start_idx, rx_idx;
+	u16 pkt_prod, bd_prod;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_parse_bd *pbd = NULL;
+	dma_addr_t mapping;
+	union eth_rx_cqe *cqe;
+	u8 cqe_fp_flags;
+	struct sw_rx_bd *rx_buf;
+	u16 len;
+	int rc = -ENODEV;
+
+	/* check the loopback mode */
+	switch (loopback_mode) {
+	case BNX2X_PHY_LOOPBACK:
+		if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
+			return -EINVAL;
+		break;
+	case BNX2X_MAC_LOOPBACK:
+		bp->link_params.loopback_mode = LOOPBACK_BMAC;
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* prepare the loopback packet */
+	pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+		     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+	if (!skb) {
+		rc = -ENOMEM;
+		goto test_loopback_exit;
+	}
+	packet = skb_put(skb, pkt_size);
+	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+	memset(packet + ETH_ALEN, 0, ETH_ALEN);
+	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		packet[i] = (unsigned char) (i & 0xff);
+
+	/* send the loopback packet */
+	num_pkts = 0;
+	tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+	pkt_prod = fp_tx->tx_pkt_prod++;
+	tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_buf->first_bd = fp_tx->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	bd_prod = TX_BD(fp_tx->tx_bd_prod);
+	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+	tx_start_bd->general_data = ((UNICAST_ADDRESS <<
+				ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+	pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
+
+	memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+
+	wmb();
+
+	fp_tx->tx_db.data.prod += 2;
+	barrier();
+	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
+
+	mmiowb();
+
+	num_pkts++;
+	fp_tx->tx_bd_prod += 2; /* start + pbd */
+
+	udelay(100);
+
+	tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+	if (tx_idx != tx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+	if (rx_idx != rx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+	if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+		goto test_loopback_rx_exit;
+
+	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+	if (len != pkt_size)
+		goto test_loopback_rx_exit;
+
+	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+	skb = rx_buf->skb;
+	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		if (*(skb->data + i) != (unsigned char) (i & 0xff))
+			goto test_loopback_rx_exit;
+
+	rc = 0;
+
+test_loopback_rx_exit:
+
+	fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+	fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+	fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+	fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+			     fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+	bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+	return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
+{
+	int rc = 0, res;
+
+	if (BP_NOMCP(bp))
+		return rc;
+
+	if (!netif_running(bp->dev))
+		return BNX2X_LOOPBACK_FAILED;
+
+	bnx2x_netif_stop(bp, 1);
+	bnx2x_acquire_phy_lock(bp);
+
+	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
+	if (res) {
+		DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
+		rc |= BNX2X_PHY_LOOPBACK_FAILED;
+	}
+
+	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
+	if (res) {
+		DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
+		rc |= BNX2X_MAC_LOOPBACK_FAILED;
+	}
+
+	bnx2x_release_phy_lock(bp);
+	bnx2x_netif_start(bp);
+
+	return rc;
+}
+
+#define CRC32_RESIDUAL			0xdebb20e3
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+	static const struct {
+		int offset;
+		int size;
+	} nvram_tbl[] = {
+		{     0,  0x14 }, /* bootstrap */
+		{  0x14,  0xec }, /* dir */
+		{ 0x100, 0x350 }, /* manuf_info */
+		{ 0x450,  0xf0 }, /* feature_info */
+		{ 0x640,  0x64 }, /* upgrade_key_info */
+		{ 0x6a4,  0x64 },
+		{ 0x708,  0x70 }, /* manuf_key_info */
+		{ 0x778,  0x70 },
+		{     0,     0 }
+	};
+	__be32 buf[0x350 / 4];
+	u8 *data = (u8 *)buf;
+	int i, rc;
+	u32 magic, crc;
+
+	if (BP_NOMCP(bp))
+		return 0;
+
+	rc = bnx2x_nvram_read(bp, 0, data, 4);
+	if (rc) {
+		DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+		goto test_nvram_exit;
+	}
+
+	magic = be32_to_cpu(buf[0]);
+	if (magic != 0x669955aa) {
+		DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+		rc = -ENODEV;
+		goto test_nvram_exit;
+	}
+
+	for (i = 0; nvram_tbl[i].size; i++) {
+
+		rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
+				      nvram_tbl[i].size);
+		if (rc) {
+			DP(NETIF_MSG_PROBE,
+			   "nvram_tbl[%d] read data (rc %d)\n", i, rc);
+			goto test_nvram_exit;
+		}
+
+		crc = ether_crc_le(nvram_tbl[i].size, data);
+		if (crc != CRC32_RESIDUAL) {
+			DP(NETIF_MSG_PROBE,
+			   "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+			rc = -ENODEV;
+			goto test_nvram_exit;
+		}
+	}
+
+test_nvram_exit:
+	return rc;
+}
+
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+	int i, rc;
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	config->hdr.length = 0;
+	if (CHIP_IS_E1(bp))
+		/* use last unicast entries */
+		config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
+	else
+		config->hdr.offset = BP_FUNC(bp);
+	config->hdr.client_id = bp->fp->cl_id;
+	config->hdr.reserved1 = 0;
+
+	bp->set_mac_pending++;
+	smp_wmb();
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+	if (rc == 0) {
+		for (i = 0; i < 10; i++) {
+			if (!bp->set_mac_pending)
+				break;
+			smp_rmb();
+			msleep_interruptible(10);
+		}
+		if (i == 10)
+			rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+			    struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		etest->flags |= ETH_TEST_FL_FAILED;
+		return;
+	}
+
+	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+	if (!netif_running(dev))
+		return;
+
+	/* offline tests are not supported in MF mode */
+	if (IS_E1HMF(bp))
+		etest->flags &= ~ETH_TEST_FL_OFFLINE;
+
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int port = BP_PORT(bp);
+		u32 val;
+		u8 link_up;
+
+		/* save current value of input enable for TX port IF */
+		val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+		/* disable input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+		link_up = (bnx2x_link_test(bp) == 0);
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		bnx2x_nic_load(bp, LOAD_DIAG);
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, link_up);
+
+		if (bnx2x_test_registers(bp) != 0) {
+			buf[0] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if (bnx2x_test_memory(bp) != 0) {
+			buf[1] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		buf[2] = bnx2x_test_loopback(bp, link_up);
+		if (buf[2] != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+
+		/* restore input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, link_up);
+	}
+	if (bnx2x_test_nvram(bp) != 0) {
+		buf[3] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bnx2x_test_intr(bp) != 0) {
+		buf[4] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bp->port.pmf)
+		if (bnx2x_link_test(bp) != 0) {
+			buf[5] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+
+#ifdef BNX2X_EXTRA_DEBUG
+	bnx2x_panic_dump(bp);
+#endif
+}
+
+static const struct {
+	long offset;
+	int size;
+	u8 string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
+/* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
+	{ Q_STATS_OFFSET32(error_bytes_received_hi),
+						8, "[%d]: rx_error_bytes" },
+	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+						8, "[%d]: rx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+						8, "[%d]: rx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+						8, "[%d]: rx_bcast_packets" },
+	{ Q_STATS_OFFSET32(no_buff_discard_hi),	8, "[%d]: rx_discards" },
+	{ Q_STATS_OFFSET32(rx_err_discard_pkt),
+					 4, "[%d]: rx_phy_ip_err_discards"},
+	{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
+					 4, "[%d]: rx_skb_alloc_discard" },
+	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+
+/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%d]: tx_bytes" },
+	{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+						8, "[%d]: tx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+						8, "[%d]: tx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+						8, "[%d]: tx_bcast_packets" }
+};
+
+static const struct {
+	long offset;
+	int size;
+	u32 flags;
+#define STATS_FLAGS_PORT		1
+#define STATS_FLAGS_FUNC		2
+#define STATS_FLAGS_BOTH		(STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+	u8 string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
+/* 1 */	{ STATS_OFFSET32(total_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bytes" },
+	{ STATS_OFFSET32(error_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+				8, STATS_FLAGS_PORT, "rx_crc_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+				8, STATS_FLAGS_PORT, "rx_align_errors" },
+	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+	{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+				8, STATS_FLAGS_PORT, "rx_fragments" },
+	{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+				8, STATS_FLAGS_PORT, "rx_jabbers" },
+	{ STATS_OFFSET32(no_buff_discard_hi),
+				8, STATS_FLAGS_BOTH, "rx_discards" },
+	{ STATS_OFFSET32(mac_filter_discard),
+				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+	{ STATS_OFFSET32(xxoverflow_discard),
+				4, STATS_FLAGS_PORT, "rx_fw_discards" },
+	{ STATS_OFFSET32(brb_drop_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_discard" },
+	{ STATS_OFFSET32(brb_truncate_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+	{ STATS_OFFSET32(pause_frames_received_hi),
+				8, STATS_FLAGS_PORT, "rx_pause_frames" },
+	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+	{ STATS_OFFSET32(nig_timer_max),
+			4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+				4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+	{ STATS_OFFSET32(rx_skb_alloc_failed),
+				4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+	{ STATS_OFFSET32(hw_csum_err),
+				4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+	{ STATS_OFFSET32(total_bytes_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bytes" },
+	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+				8, STATS_FLAGS_PORT, "tx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+				8, STATS_FLAGS_PORT, "tx_mac_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_single_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+				8, STATS_FLAGS_PORT, "tx_deferred" },
+	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_late_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_total_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+	{ STATS_OFFSET32(pause_frames_sent_hi),
+				8, STATS_FLAGS_PORT, "tx_pause_frames" }
+};
+
+#define IS_PORT_STAT(i) \
+	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define IS_E1HMF_MODE_STAT(bp) \
+			(IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, num_stats;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
+			if (!IS_E1HMF_MODE_STAT(bp))
+				num_stats += BNX2X_NUM_STATS;
+		} else {
+			if (IS_E1HMF_MODE_STAT(bp)) {
+				num_stats = 0;
+				for (i = 0; i < BNX2X_NUM_STATS; i++)
+					if (IS_FUNC_STAT(i))
+						num_stats++;
+			} else
+				num_stats = BNX2X_NUM_STATS;
+		}
+		return num_stats;
+
+	case ETH_SS_TEST:
+		return BNX2X_NUM_TESTS;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, j, k;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			k = 0;
+			for_each_queue(bp, i) {
+				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+					sprintf(buf + (k + j)*ETH_GSTRING_LEN,
+						bnx2x_q_stats_arr[j].string, i);
+				k += BNX2X_NUM_Q_STATS;
+			}
+			if (IS_E1HMF_MODE_STAT(bp))
+				break;
+			for (j = 0; j < BNX2X_NUM_STATS; j++)
+				strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+				       bnx2x_stats_arr[j].string);
+		} else {
+			for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+				if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+					continue;
+				strcpy(buf + j*ETH_GSTRING_LEN,
+				       bnx2x_stats_arr[i].string);
+				j++;
+			}
+		}
+		break;
+
+	case ETH_SS_TEST:
+		memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+		break;
+	}
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 *hw_stats, *offset;
+	int i, j, k;
+
+	if (is_multi(bp)) {
+		k = 0;
+		for_each_queue(bp, i) {
+			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+				if (bnx2x_q_stats_arr[j].size == 0) {
+					/* skip this counter */
+					buf[k + j] = 0;
+					continue;
+				}
+				offset = (hw_stats +
+					  bnx2x_q_stats_arr[j].offset);
+				if (bnx2x_q_stats_arr[j].size == 4) {
+					/* 4-byte counter */
+					buf[k + j] = (u64) *offset;
+					continue;
+				}
+				/* 8-byte counter */
+				buf[k + j] = HILO_U64(*offset, *(offset + 1));
+			}
+			k += BNX2X_NUM_Q_STATS;
+		}
+		if (IS_E1HMF_MODE_STAT(bp))
+			return;
+		hw_stats = (u32 *)&bp->eth_stats;
+		for (j = 0; j < BNX2X_NUM_STATS; j++) {
+			if (bnx2x_stats_arr[j].size == 0) {
+				/* skip this counter */
+				buf[k + j] = 0;
+				continue;
+			}
+			offset = (hw_stats + bnx2x_stats_arr[j].offset);
+			if (bnx2x_stats_arr[j].size == 4) {
+				/* 4-byte counter */
+				buf[k + j] = (u64) *offset;
+				continue;
+			}
+			/* 8-byte counter */
+			buf[k + j] = HILO_U64(*offset, *(offset + 1));
+		}
+	} else {
+		hw_stats = (u32 *)&bp->eth_stats;
+		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+			if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+				continue;
+			if (bnx2x_stats_arr[i].size == 0) {
+				/* skip this counter */
+				buf[j] = 0;
+				j++;
+				continue;
+			}
+			offset = (hw_stats + bnx2x_stats_arr[i].offset);
+			if (bnx2x_stats_arr[i].size == 4) {
+				/* 4-byte counter */
+				buf[j] = (u64) *offset;
+				j++;
+				continue;
+			}
+			/* 8-byte counter */
+			buf[j] = HILO_U64(*offset, *(offset + 1));
+			j++;
+		}
+	}
+}
+
+static int bnx2x_phys_id(struct net_device *dev, u32 data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+	if (!netif_running(dev))
+		return 0;
+
+	if (!bp->port.pmf)
+		return 0;
+
+	if (data == 0)
+		data = 2;
+
+	for (i = 0; i < (data * 2); i++) {
+		if ((i % 2) == 0)
+			bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+				      SPEED_1000);
+		else
+			bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
+
+		msleep_interruptible(500);
+		if (signal_pending(current))
+			break;
+	}
+
+	if (bp->link_vars.link_up)
+		bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+			      bp->link_vars.line_speed);
+
+	return 0;
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+	.get_settings		= bnx2x_get_settings,
+	.set_settings		= bnx2x_set_settings,
+	.get_drvinfo		= bnx2x_get_drvinfo,
+	.get_regs_len		= bnx2x_get_regs_len,
+	.get_regs		= bnx2x_get_regs,
+	.get_wol		= bnx2x_get_wol,
+	.set_wol		= bnx2x_set_wol,
+	.get_msglevel		= bnx2x_get_msglevel,
+	.set_msglevel		= bnx2x_set_msglevel,
+	.nway_reset		= bnx2x_nway_reset,
+	.get_link		= bnx2x_get_link,
+	.get_eeprom_len		= bnx2x_get_eeprom_len,
+	.get_eeprom		= bnx2x_get_eeprom,
+	.set_eeprom		= bnx2x_set_eeprom,
+	.get_coalesce		= bnx2x_get_coalesce,
+	.set_coalesce		= bnx2x_set_coalesce,
+	.get_ringparam		= bnx2x_get_ringparam,
+	.set_ringparam		= bnx2x_set_ringparam,
+	.get_pauseparam		= bnx2x_get_pauseparam,
+	.set_pauseparam		= bnx2x_set_pauseparam,
+	.get_rx_csum		= bnx2x_get_rx_csum,
+	.set_rx_csum		= bnx2x_set_rx_csum,
+	.get_tx_csum		= ethtool_op_get_tx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
+	.set_flags		= bnx2x_set_flags,
+	.get_flags		= ethtool_op_get_flags,
+	.get_sg			= ethtool_op_get_sg,
+	.set_sg			= ethtool_op_set_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.set_tso		= bnx2x_set_tso,
+	.self_test		= bnx2x_self_test,
+	.get_sset_count		= bnx2x_get_sset_count,
+	.get_strings		= bnx2x_get_strings,
+	.phys_id		= bnx2x_phys_id,
+	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+}
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
similarity index 100%
rename from drivers/net/bnx2x_fw_defs.h
rename to drivers/net/bnx2x/bnx2x_fw_defs.h
diff --git a/drivers/net/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
similarity index 100%
rename from drivers/net/bnx2x_fw_file_hdr.h
rename to drivers/net/bnx2x/bnx2x_fw_file_hdr.h
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
similarity index 100%
rename from drivers/net/bnx2x_hsi.h
rename to drivers/net/bnx2x/bnx2x_hsi.h
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
similarity index 100%
rename from drivers/net/bnx2x_init.h
rename to drivers/net/bnx2x/bnx2x_init.h
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
similarity index 100%
rename from drivers/net/bnx2x_init_ops.h
rename to drivers/net/bnx2x/bnx2x_init_ops.h
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
similarity index 100%
rename from drivers/net/bnx2x_link.c
rename to drivers/net/bnx2x/bnx2x_link.c
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
similarity index 100%
rename from drivers/net/bnx2x_link.h
rename to drivers/net/bnx2x/bnx2x_link.h
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
new file mode 100644
index 0000000..b4ec2b0
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -0,0 +1,8040 @@
+/* bnx2x_main.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>  /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+
+#define BNX2X_MAIN
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+
+
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION					\
+	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
+	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1		"bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H	"bnx2x-e1h-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT		(5*HZ)
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
+	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+
+static int multi_mode = 1;
+module_param(multi_mode, int, 0);
+MODULE_PARM_DESC(multi_mode, " Multi queue mode "
+			     "(0 Disable; 1 Enable (default))");
+
+static int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+				" (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+static int int_mode;
+module_param(int_mode, int, 0);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+				"(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int poll;
+module_param(poll, int, 0);
+MODULE_PARM_DESC(poll, " Use polling (for debug)");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static struct workqueue_struct *bnx2x_wq;
+
+enum bnx2x_board_type {
+	BCM57710 = 0,
+	BCM57711 = 1,
+	BCM57711E = 2,
+};
+
+/* indexed by board_type, above */
+static struct {
+	char *name;
+} board_info[] __devinitdata = {
+	{ "Broadcom NetXtreme II BCM57710 XGb" },
+	{ "Broadcom NetXtreme II BCM57711 XGb" },
+	{ "Broadcom NetXtreme II BCM57711E XGb" }
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/* used only at init
+ * locking is done by mcp
+ */
+void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+	u32 val;
+
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+
+	return val;
+}
+
+const u32 dmae_reg_go_c[] = {
+	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+	u32 cmd_offset;
+	int i;
+
+	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+
+		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
+	}
+	REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+		      u32 len32)
+{
+	struct dmae_command dmae;
+	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+	int cnt = 200;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
+		   "  using indirect\n", dst_addr, len32);
+		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+		return;
+	}
+
+	memset(&dmae, 0, sizeof(struct dmae_command));
+
+	dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+	dmae.src_addr_lo = U64_LO(dma_addr);
+	dmae.src_addr_hi = U64_HI(dma_addr);
+	dmae.dst_addr_lo = dst_addr >> 2;
+	dmae.dst_addr_hi = 0;
+	dmae.len = len32;
+	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+	dmae.comp_val = DMAE_COMP_VAL;
+
+	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
+	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
+		    "dst_addr [%x:%08x (%08x)]\n"
+	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
+	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
+	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
+	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+	mutex_lock(&bp->dmae_mutex);
+
+	*wb_comp = 0;
+
+	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
+
+	udelay(5);
+
+	while (*wb_comp != DMAE_COMP_VAL) {
+		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
+
+		if (!cnt) {
+			BNX2X_ERR("DMAE timeout!\n");
+			break;
+		}
+		cnt--;
+		/* adjust delay for emulation/FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			msleep(100);
+		else
+			udelay(5);
+	}
+
+	mutex_unlock(&bp->dmae_mutex);
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+	struct dmae_command dmae;
+	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+	int cnt = 200;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+		int i;
+
+		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
+		   "  using indirect\n", src_addr, len32);
+		for (i = 0; i < len32; i++)
+			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+		return;
+	}
+
+	memset(&dmae, 0, sizeof(struct dmae_command));
+
+	dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+	dmae.src_addr_lo = src_addr >> 2;
+	dmae.src_addr_hi = 0;
+	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+	dmae.len = len32;
+	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+	dmae.comp_val = DMAE_COMP_VAL;
+
+	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
+	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
+		    "dst_addr [%x:%08x (%08x)]\n"
+	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
+	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
+	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
+
+	mutex_lock(&bp->dmae_mutex);
+
+	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
+	*wb_comp = 0;
+
+	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
+
+	udelay(5);
+
+	while (*wb_comp != DMAE_COMP_VAL) {
+
+		if (!cnt) {
+			BNX2X_ERR("DMAE timeout!\n");
+			break;
+		}
+		cnt--;
+		/* adjust delay for emulation/FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			msleep(100);
+		else
+			udelay(5);
+	}
+	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+	mutex_unlock(&bp->dmae_mutex);
+}
+
+void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+			       u32 addr, u32 len)
+{
+	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+	int offset = 0;
+
+	while (len > dmae_wr_max) {
+		bnx2x_write_dmae(bp, phys_addr + offset,
+				 addr + offset, dmae_wr_max);
+		offset += dmae_wr_max * 4;
+		len -= dmae_wr_max;
+	}
+
+	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+/* used only for slowpath so not inlined */
+static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
+{
+	u32 wb_write[2];
+
+	wb_write[0] = val_hi;
+	wb_write[1] = val_lo;
+	REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+#ifdef USE_WB_RD
+static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
+{
+	u32 wb_data[2];
+
+	REG_RD_DMAE(bp, reg, wb_data, 2);
+
+	return HILO_U64(wb_data[0], wb_data[1]);
+}
+#endif
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+	char last_idx;
+	int i, rc = 0;
+	u32 row0, row1, row2, row3;
+
+	/* XSTORM */
+	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
+			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* TSTORM */
+	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
+			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* CSTORM */
+	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
+			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* USTORM */
+	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
+			   USTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static void bnx2x_fw_dump(struct bnx2x *bp)
+{
+	u32 addr;
+	u32 mark, offset;
+	__be32 data[9];
+	int word;
+
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERR("NO MCP - can not dump\n");
+		return;
+	}
+
+	addr = bp->common.shmem_base - 0x0800 + 4;
+	mark = REG_RD(bp, addr);
+	mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
+	pr_err("begin fw dump (mark 0x%x)\n", mark);
+
+	pr_err("");
+	for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+	pr_err("end of fw dump\n");
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp)
+{
+	int i;
+	u16 j, start, end;
+
+	bp->stats_state = STATS_STATE_DISABLED;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	BNX2X_ERR("begin crash dump -----------------\n");
+
+	/* Indices */
+	/* Common */
+	BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
+		  "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
+		  "  spq_prod_idx(0x%x)\n",
+		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
+		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
+
+	/* Rx */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
+			  "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
+			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
+			  i, fp->rx_bd_prod, fp->rx_bd_cons,
+			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
+			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
+			  "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
+			  fp->rx_sge_prod, fp->last_max_sge,
+			  le16_to_cpu(fp->fp_u_idx),
+			  fp->status_blk->u_status_block.status_block_index);
+	}
+
+	/* Tx */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
+			  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
+			  "  *tx_cons_sb(0x%x)\n",
+			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
+			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
+		BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
+			  "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
+			  fp->status_blk->c_status_block.status_block_index,
+			  fp->tx_db.data.prod);
+	}
+
+	/* Rings */
+	/* Rx */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+		for (j = start; j != end; j = RX_BD(j + 1)) {
+			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
+				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+		}
+
+		start = RX_SGE(fp->rx_sge_prod);
+		end = RX_SGE(fp->last_max_sge);
+		for (j = start; j != end; j = RX_SGE(j + 1)) {
+			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
+				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
+		}
+
+		start = RCQ_BD(fp->rx_comp_cons - 10);
+		end = RCQ_BD(fp->rx_comp_cons + 503);
+		for (j = start; j != end; j = RCQ_BD(j + 1)) {
+			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+		}
+	}
+
+	/* Tx */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
+		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
+		for (j = start; j != end; j = TX_BD(j + 1)) {
+			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
+
+			BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
+				  i, j, sw_bd->skb, sw_bd->first_bd);
+		}
+
+		start = TX_BD(fp->tx_bd_cons - 10);
+		end = TX_BD(fp->tx_bd_cons + 254);
+		for (j = start; j != end; j = TX_BD(j + 1)) {
+			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
+
+			BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
+				  i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
+		}
+	}
+
+	bnx2x_fw_dump(bp);
+	bnx2x_mc_assert(bp);
+	BNX2X_ERR("end crash dump -----------------\n");
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+	if (msix) {
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0);
+		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else if (msi) {
+		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else {
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+		   val, port, addr);
+
+		REG_WR(bp, addr, val);
+
+		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+	}
+
+	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
+	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+	REG_WR(bp, addr, val);
+	/*
+	 * Ensure that HC_CONFIG is written before leading/trailing edge config
+	 */
+	mmiowb();
+	barrier();
+
+	if (CHIP_IS_E1H(bp)) {
+		/* init leading/trailing edge */
+		if (IS_E1HMF(bp)) {
+			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+			if (bp->port.pmf)
+				/* enable nig and gpio3 attention */
+				val |= 0x1100;
+		} else
+			val = 0xffff;
+
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+	}
+
+	/* Make sure that interrupts are indeed enabled from here on */
+	mmiowb();
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+
+	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
+		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+	   val, port, addr);
+
+	/* flush all outstanding writes */
+	mmiowb();
+
+	REG_WR(bp, addr, val);
+	if (REG_RD(bp, addr) != val)
+		BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int i, offset;
+
+	/* disable interrupt handling */
+	atomic_inc(&bp->intr_sem);
+	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
+
+	if (disable_hw)
+		/* prevent the HW from sending interrupts */
+		bnx2x_int_disable(bp);
+
+	/* make sure all ISRs are done */
+	if (msix) {
+		synchronize_irq(bp->msix_table[0].vector);
+		offset = 1;
+#ifdef BCM_CNIC
+		offset++;
+#endif
+		for_each_queue(bp, i)
+			synchronize_irq(bp->msix_table[i + offset].vector);
+	} else
+		synchronize_irq(bp->pdev->irq);
+
+	/* make sure sp_task is not running */
+	cancel_delayed_work(&bp->sp_task);
+	flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5)
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	else
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+	/* Try to acquire the lock */
+	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit)
+		return true;
+
+	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+	return false;
+}
+
+
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
+#endif
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp,
+			   union eth_rx_cqe *rr_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+
+	DP(BNX2X_MSG_SP,
+	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
+	   fp->index, cid, command, bp->state,
+	   rr_cqe->ramrod_cqe.ramrod_type);
+
+	bp->spq_left++;
+
+	if (fp->index) {
+		switch (command | fp->state) {
+		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
+						BNX2X_FP_STATE_OPENING):
+			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
+			   cid);
+			fp->state = BNX2X_FP_STATE_OPEN;
+			break;
+
+		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
+			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
+			   cid);
+			fp->state = BNX2X_FP_STATE_HALTED;
+			break;
+
+		default:
+			BNX2X_ERR("unexpected MC reply (%d)  "
+				  "fp[%d] state is %x\n",
+				  command, fp->index, fp->state);
+			break;
+		}
+		mb(); /* force bnx2x_wait_ramrod() to see the change */
+		return;
+	}
+
+	switch (command | bp->state) {
+	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
+		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
+		bp->state = BNX2X_STATE_OPEN;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
+		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
+		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
+		fp->state = BNX2X_FP_STATE_HALTED;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
+		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
+		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
+		break;
+
+#ifdef BCM_CNIC
+	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
+		DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
+		bnx2x_cnic_cfc_comp(bp, cid);
+		break;
+#endif
+
+	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
+	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
+		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
+		bp->set_mac_pending--;
+		smp_wmb();
+		break;
+
+	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
+		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
+		bp->set_mac_pending--;
+		smp_wmb();
+		break;
+
+	default:
+		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
+			  command, bp->state);
+		break;
+	}
+	mb(); /* force bnx2x_wait_ramrod() to see the change */
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+	struct bnx2x *bp = netdev_priv(dev_instance);
+	u16 status = bnx2x_ack_int(bp);
+	u16 mask;
+	int i;
+
+	/* Return here if interrupt is shared and it's not for us */
+	if (unlikely(status == 0)) {
+		DP(NETIF_MSG_INTR, "not our interrupt!\n");
+		return IRQ_NONE;
+	}
+	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
+
+	/* Return here if interrupt is disabled */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+		return IRQ_HANDLED;
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		mask = 0x2 << fp->sb_id;
+		if (status & mask) {
+			/* Handle Rx and Tx according to SB id */
+			prefetch(fp->rx_cons_sb);
+			prefetch(&fp->status_blk->u_status_block.
+						status_block_index);
+			prefetch(fp->tx_cons_sb);
+			prefetch(&fp->status_blk->c_status_block.
+						status_block_index);
+			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+			status &= ~mask;
+		}
+	}
+
+#ifdef BCM_CNIC
+	mask = 0x2 << CNIC_SB_ID(bp);
+	if (status & (mask | 0x1)) {
+		struct cnic_ops *c_ops = NULL;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+
+		status &= ~mask;
+	}
+#endif
+
+	if (unlikely(status & 0x1)) {
+		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+		status &= ~0x1;
+		if (!status)
+			return IRQ_HANDLED;
+	}
+
+	if (unlikely(status))
+		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+		   status);
+
+	return IRQ_HANDLED;
+}
+
+/* end of fast path */
+
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+	int cnt;
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is not already taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit) {
+		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+		   lock_status, resource_bit);
+		return -EEXIST;
+	}
+
+	/* Try for 5 second every 5ms */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		/* Try to acquire the lock */
+		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+		lock_status = REG_RD(bp, hw_lock_control_reg);
+		if (lock_status & resource_bit)
+			return 0;
+
+		msleep(5);
+	}
+	DP(NETIF_MSG_HW, "Timeout\n");
+	return -EAGAIN;
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is currently taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (!(lock_status & resource_bit)) {
+		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+		   lock_status, resource_bit);
+		return -EFAULT;
+	}
+
+	REG_WR(bp, hw_lock_control_reg, resource_bit);
+	return 0;
+}
+
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+	int value;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	/* read GPIO value */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+	/* get the requested pin value */
+	if ((gpio_reg & gpio_mask) == gpio_mask)
+		value = 1;
+	else
+		value = 0;
+
+	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
+
+	return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+		   gpio_num, gpio_shift);
+		/* set FLOAT */
+		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO int */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+				   "output low\n", gpio_num, gpio_shift);
+		/* clear SET and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+				   "output high\n", gpio_num, gpio_shift);
+		/* clear CLR and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+	u32 spio_mask = (1 << spio_num);
+	u32 spio_reg;
+
+	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+	    (spio_num > MISC_REGISTERS_SPIO_7)) {
+		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+	/* read SPIO and mask except the float bits */
+	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+		/* clear FLOAT and set CLR */
+		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+		/* clear FLOAT and set SET */
+		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+		/* set FLOAT */
+		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_SPIO, spio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+	return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+	switch (bp->link_vars.ieee_fc &
+		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+					  ADVERTISED_Pause);
+		break;
+
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+		bp->port.advertising |= (ADVERTISED_Asym_Pause |
+					 ADVERTISED_Pause);
+		break;
+
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+		bp->port.advertising |= ADVERTISED_Asym_Pause;
+		break;
+
+	default:
+		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+					  ADVERTISED_Pause);
+		break;
+	}
+}
+
+
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+	if (!BP_NOMCP(bp)) {
+		u8 rc;
+
+		/* Initialize link parameters structure variables */
+		/* It is recommended to turn off RX FC for jumbo frames
+		   for better performance */
+		if (bp->dev->mtu > 5000)
+			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+		else
+			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+
+		bnx2x_acquire_phy_lock(bp);
+
+		if (load_mode == LOAD_DIAG)
+			bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
+
+		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_calc_fc_adv(bp);
+
+		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+			bnx2x_link_report(bp);
+		}
+
+		return rc;
+	}
+	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+	return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_calc_fc_adv(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp)
+{
+	u8 rc = 0;
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+	return rc;
+}
+
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+	u32 r_param = bp->link_vars.line_speed / 8;
+	u32 fair_periodic_timeout_usec;
+	u32 t_fair;
+
+	memset(&(bp->cmng.rs_vars), 0,
+	       sizeof(struct rate_shaping_vars_per_port));
+	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+
+	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+
+	/* this is the threshold below which no timer arming will occur
+	   1.25 coefficient is for the threshold to be a little bigger
+	   than the real time, to compensate for timer in-accuracy */
+	bp->cmng.rs_vars.rs_threshold =
+				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+
+	/* resolution of fairness timer */
+	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+	/* for 10G it is 1000usec. for 1G it is 10000usec. */
+	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+
+	/* this is the threshold below which we won't arm the timer anymore */
+	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+
+	/* we multiply by 1e3/8 to get bytes/msec.
+	   We don't want the credits to pass a credit
+	   of the t_fair*FAIR_MEM (algorithm resolution) */
+	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+	/* since each tick is 4 usec */
+	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
+}
+
+/* Calculates the sum of vn_min_rates.
+   It's needed for further normalizing of the min_rates.
+   Returns:
+     sum of vn_min_rates.
+       or
+     0 - if all the min_rates are 0.
+     In the later case fainess algorithm should be deactivated.
+     If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+{
+	int all_zero = 1;
+	int port = BP_PORT(bp);
+	int vn;
+
+	bp->vn_weight_sum = 0;
+	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+		int func = 2*vn + port;
+		u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+		/* Skip hidden vns */
+		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+			continue;
+
+		/* If min rate is zero - set it to 1 */
+		if (!vn_min_rate)
+			vn_min_rate = DEF_MIN_RATE;
+		else
+			all_zero = 0;
+
+		bp->vn_weight_sum += vn_min_rate;
+	}
+
+	/* ... only if all min rates are zeros - disable fairness */
+	if (all_zero) {
+		bp->cmng.flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+		   "  fairness will be disabled\n");
+	} else
+		bp->cmng.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
+{
+	struct rate_shaping_vars_per_vn m_rs_vn;
+	struct fairness_vars_per_vn m_fair_vn;
+	u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+	u16 vn_min_rate, vn_max_rate;
+	int i;
+
+	/* If function is hidden - set min and max to zeroes */
+	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+		vn_min_rate = 0;
+		vn_max_rate = 0;
+
+	} else {
+		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+		/* If min rate is zero - set it to 1 */
+		if (!vn_min_rate)
+			vn_min_rate = DEF_MIN_RATE;
+		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+	}
+	DP(NETIF_MSG_IFUP,
+	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
+	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+
+	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
+
+	/* global vn counter - maximal Mbps for this vn */
+	m_rs_vn.vn_counter.rate = vn_max_rate;
+
+	/* quota - number of bytes transmitted in this period */
+	m_rs_vn.vn_counter.quota =
+				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+	if (bp->vn_weight_sum) {
+		/* credit for each period of the fairness algorithm:
+		   number of bytes in T_FAIR (the vn share the port rate).
+		   vn_weight_sum should not be larger than 10000, thus
+		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+		   than zero */
+		m_fair_vn.vn_credit_delta =
+			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+						   (8 * bp->vn_weight_sum))),
+			      (bp->cmng.fair_vars.fair_threshold * 2));
+		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+		   m_fair_vn.vn_credit_delta);
+	}
+
+	/* Store it to internal memory */
+	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM +
+		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+		       ((u32 *)(&m_rs_vn))[i]);
+
+	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM +
+		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+		       ((u32 *)(&m_fair_vn))[i]);
+}
+
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+	u32 prev_link_status = bp->link_vars.link_status;
+	/* Make sure that we are synced with the current statistics */
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+	if (bp->link_vars.link_up) {
+
+		/* dropless flow control */
+		if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
+			int port = BP_PORT(bp);
+			u32 pause_enabled = 0;
+
+			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+				pause_enabled = 1;
+
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+			       pause_enabled);
+		}
+
+		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+			struct host_port_stats *pstats;
+
+			pstats = bnx2x_sp(bp, port_stats);
+			/* reset old bmac stats */
+			memset(&(pstats->mac_stx[0]), 0,
+			       sizeof(struct mac_stx));
+		}
+		if (bp->state == BNX2X_STATE_OPEN)
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	}
+
+	/* indicate link status only if link status actually changed */
+	if (prev_link_status != bp->link_vars.link_status)
+		bnx2x_link_report(bp);
+
+	if (IS_E1HMF(bp)) {
+		int port = BP_PORT(bp);
+		int func;
+		int vn;
+
+		/* Set the attention towards other drivers on the same port */
+		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+			if (vn == BP_E1HVN(bp))
+				continue;
+
+			func = ((vn << 1) | port);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+		}
+
+		if (bp->link_vars.link_up) {
+			int i;
+
+			/* Init rate shaping and fairness contexts */
+			bnx2x_init_port_minmax(bp);
+
+			for (vn = VN_0; vn < E1HVN_MAX; vn++)
+				bnx2x_init_vn_minmax(bp, 2*vn + port);
+
+			/* Store it to internal memory */
+			for (i = 0;
+			     i < sizeof(struct cmng_struct_per_port) / 4; i++)
+				REG_WR(bp, BAR_XSTRORM_INTMEM +
+				  XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
+				       ((u32 *)(&bp->cmng))[i]);
+		}
+	}
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+	if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
+		return;
+
+	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+
+	if (bp->link_vars.link_up)
+		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	else
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	bnx2x_calc_vn_weight_sum(bp);
+
+	/* indicate link status */
+	bnx2x_link_report(bp);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	bp->port.pmf = 1;
+	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+	/* enable nig attention */
+	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+
+	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
+{
+	int func = BP_FUNC(bp);
+	u32 seq = ++bp->fw_seq;
+	u32 rc = 0;
+	u32 cnt = 1;
+	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+	mutex_lock(&bp->fw_mb_mutex);
+	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
+	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
+
+	do {
+		/* let the FW do it's magic ... */
+		msleep(delay);
+
+		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
+
+		/* Give the FW up to 5 second (500*10ms) */
+	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+	   cnt*delay, rc, seq);
+
+	/* is this a reply to our command? */
+	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+		rc &= FW_MSG_CODE_MASK;
+	else {
+		/* FW BUG! */
+		BNX2X_ERR("FW failed to respond!\n");
+		bnx2x_fw_dump(bp);
+		rc = 0;
+	}
+	mutex_unlock(&bp->fw_mb_mutex);
+
+	return rc;
+}
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	netif_tx_disable(bp->dev);
+
+	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+	netif_carrier_off(bp->dev);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+
+	/* Tx queue should be only reenabled */
+	netif_tx_wake_all_queues(bp->dev);
+
+	/*
+	 * Should not call netif_carrier_on since it will be called if the link
+	 * is up when checking for link state
+	 */
+}
+
+static void bnx2x_update_min_max(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int vn, i;
+
+	/* Init rate shaping and fairness contexts */
+	bnx2x_init_port_minmax(bp);
+
+	bnx2x_calc_vn_weight_sum(bp);
+
+	for (vn = VN_0; vn < E1HVN_MAX; vn++)
+		bnx2x_init_vn_minmax(bp, 2*vn + port);
+
+	if (bp->port.pmf) {
+		int func;
+
+		/* Set the attention towards other drivers on the same port */
+		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+			if (vn == BP_E1HVN(bp))
+				continue;
+
+			func = ((vn << 1) | port);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+		}
+
+		/* Store it to internal memory */
+		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
+			REG_WR(bp, BAR_XSTRORM_INTMEM +
+			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
+			       ((u32 *)(&bp->cmng))[i]);
+	}
+}
+
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+{
+	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+
+	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+		/*
+		 * This is the only place besides the function initialization
+		 * where the bp->flags can change so it is done without any
+		 * locks
+		 */
+		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
+			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+			bp->flags |= MF_FUNC_DIS;
+
+			bnx2x_e1h_disable(bp);
+		} else {
+			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+			bp->flags &= ~MF_FUNC_DIS;
+
+			bnx2x_e1h_enable(bp);
+		}
+		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+	}
+	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+
+		bnx2x_update_min_max(bp);
+		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+	}
+
+	/* Report results to MCP */
+	if (dcc_event)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
+	else
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
+}
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+	struct eth_spe *next_spe = bp->spq_prod_bd;
+
+	if (bp->spq_prod_bd == bp->spq_last_bd) {
+		bp->spq_prod_bd = bp->spq;
+		bp->spq_prod_idx = 0;
+		DP(NETIF_MSG_TIMER, "end of spq\n");
+	} else {
+		bp->spq_prod_bd++;
+		bp->spq_prod_idx++;
+	}
+	return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	/* Make sure that BD data is updated before writing the producer */
+	wmb();
+
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+	       bp->spq_prod_idx);
+	mmiowb();
+}
+
+/* the slow path queue is odd since completions arrive on the fastpath ring */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+			 u32 data_hi, u32 data_lo, int common)
+{
+	struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EIO;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	if (!bp->spq_left) {
+		BNX2X_ERR("BUG! SPQ ring full!\n");
+		spin_unlock_bh(&bp->spq_lock);
+		bnx2x_panic();
+		return -EBUSY;
+	}
+
+	spe = bnx2x_sp_get_next(bp);
+
+	/* CID needs port number to be encoded int it */
+	spe->hdr.conn_and_cmd_data =
+			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+				    HW_CID(bp, cid));
+	spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+	if (common)
+		spe->hdr.type |=
+			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
+
+	spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
+	spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+
+	bp->spq_left--;
+
+	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+	   "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
+	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+	   (u32)(U64_LO(bp->spq_mapping) +
+	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+	return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+	u32 j, val;
+	int rc = 0;
+
+	might_sleep();
+	for (j = 0; j < 1000; j++) {
+		val = (1UL << 31);
+		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+		if (val & (1L << 31))
+			break;
+
+		msleep(5);
+	}
+	if (!(val & (1L << 31))) {
+		BNX2X_ERR("Cannot acquire MCP access lock register\n");
+		rc = -EBUSY;
+	}
+
+	return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+	struct host_def_status_block *def_sb = bp->def_status_blk;
+	u16 rc = 0;
+
+	barrier(); /* status block is written to by the chip */
+	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+		rc |= 1;
+	}
+	if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
+		bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
+		rc |= 2;
+	}
+	if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
+		bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
+		rc |= 4;
+	}
+	if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
+		bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
+		rc |= 8;
+	}
+	if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
+		bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
+		rc |= 16;
+	}
+	return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+	int port = BP_PORT(bp);
+	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
+		       COMMAND_REG_ATTN_BITS_SET);
+	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
+	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+				       NIG_REG_MASK_INTERRUPT_PORT0;
+	u32 aeu_mask;
+	u32 nig_mask = 0;
+
+	if (bp->attn_state & asserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, aeu_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
+	   aeu_mask, asserted);
+	aeu_mask &= ~(asserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, aeu_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state |= asserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+	if (asserted & ATTN_HARD_WIRED_MASK) {
+		if (asserted & ATTN_NIG_FOR_FUNC) {
+
+			bnx2x_acquire_phy_lock(bp);
+
+			/* save nig interrupt mask */
+			nig_mask = REG_RD(bp, nig_int_mask_addr);
+			REG_WR(bp, nig_int_mask_addr, 0);
+
+			bnx2x_link_attn(bp);
+
+			/* handle unicore attn? */
+		}
+		if (asserted & ATTN_SW_TIMER_4_FUNC)
+			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+		if (asserted & GPIO_2_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+		if (asserted & GPIO_3_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+		if (asserted & GPIO_4_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+		if (port == 0) {
+			if (asserted & ATTN_GENERAL_ATTN_1) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_2) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_3) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+			}
+		} else {
+			if (asserted & ATTN_GENERAL_ATTN_4) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_5) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_6) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+			}
+		}
+
+	} /* if hardwired */
+
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+	   asserted, hc_addr);
+	REG_WR(bp, hc_addr, asserted);
+
+	/* now set back the mask */
+	if (asserted & ATTN_NIG_FOR_FUNC) {
+		REG_WR(bp, nig_int_mask_addr, nig_mask);
+		bnx2x_release_phy_lock(bp);
+	}
+}
+
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	/* mark the failure */
+	bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+	bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+		 bp->link_params.ext_phy_config);
+
+	/* log the failure */
+	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+	       " the driver to shutdown the card to prevent permanent"
+	       " damage.  Please contact OEM Support for assistance\n");
+}
+
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+	int port = BP_PORT(bp);
+	int reg_offset;
+	u32 val, swap_val, swap_override;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("SPIO5 hw attention\n");
+
+		/* Fan failure attention */
+		switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+			/* Low power mode is controlled by GPIO 2 */
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+			/* The PHY reset is controlled by GPIO 1 */
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+			/* The PHY reset is controlled by GPIO 1 */
+			/* fake the port number to cancel the swap done in
+			   set_gpio() */
+			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+			swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+			port = (swap_val && swap_override) ^ 1;
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+			break;
+
+		default:
+			break;
+		}
+		bnx2x_fan_failure(bp);
+	}
+
+	if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
+		    AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_handle_module_detect_int(&bp->link_params);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+		BNX2X_ERR("DB hw attention 0x%x\n", val);
+		/* DORQ discard attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from DORQ\n");
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+		BNX2X_ERR("CFC hw attention 0x%x\n", val);
+		/* CFC error attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from CFC\n");
+	}
+
+	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+
+		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+		BNX2X_ERR("PXP hw attention 0x%x\n", val);
+		/* RQ_USDMDP_FIFO_OVERFLOW */
+		if (val & 0x18000)
+			BNX2X_ERR("FATAL error from PXP\n");
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+		if (attn & BNX2X_PMF_LINK_ASSERT) {
+			int func = BP_FUNC(bp);
+
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bp->mf_config = SHMEM_RD(bp,
+					   mf_cfg.func_mf_config[func].config);
+			val = SHMEM_RD(bp, func_mb[func].drv_status);
+			if (val & DRV_STATUS_DCC_EVENT_MASK)
+				bnx2x_dcc_event(bp,
+					    (val & DRV_STATUS_DCC_EVENT_MASK));
+			bnx2x__link_status_update(bp);
+			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+				bnx2x_pmf_update(bp);
+
+		} else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+			BNX2X_ERR("MC assert!\n");
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+			bnx2x_panic();
+
+		} else if (attn & BNX2X_MCP_ASSERT) {
+
+			BNX2X_ERR("MCP assert!\n");
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+			bnx2x_fw_dump(bp);
+
+		} else
+			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+	}
+
+	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+		if (attn & BNX2X_GRC_TIMEOUT) {
+			val = CHIP_IS_E1H(bp) ?
+				REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
+			BNX2X_ERR("GRC time-out 0x%08x\n", val);
+		}
+		if (attn & BNX2X_GRC_RSV) {
+			val = CHIP_IS_E1H(bp) ?
+				REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
+			BNX2X_ERR("GRC reserved 0x%08x\n", val);
+		}
+		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+	}
+}
+
+#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS	16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
+	val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
+	val |= (1 << 16);
+	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
+	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+	return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+	barrier();
+	mmiowb();
+
+	return val1;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+{
+	return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
+
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+	REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+	if (idx)
+		pr_cont(", ");
+	pr_cont("%s", blk);
+}
+
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+				_print_next_block(par_num++, "BRB");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+				_print_next_block(par_num++, "PARSER");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+				_print_next_block(par_num++, "TSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+				_print_next_block(par_num++, "SEARCHER");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+				_print_next_block(par_num++, "TSEMI");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+				_print_next_block(par_num++, "PBCLIENT");
+				break;
+			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+				_print_next_block(par_num++, "QM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+				_print_next_block(par_num++, "XSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+				_print_next_block(par_num++, "XSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+				_print_next_block(par_num++, "DOORBELLQ");
+				break;
+			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+				_print_next_block(par_num++, "VAUX PCI CORE");
+				break;
+			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+				_print_next_block(par_num++, "DEBUG");
+				break;
+			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+				_print_next_block(par_num++, "USDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+				_print_next_block(par_num++, "USEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+				_print_next_block(par_num++, "UPB");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+				_print_next_block(par_num++, "CSDM");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+				_print_next_block(par_num++, "CSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+				_print_next_block(par_num++, "PXP");
+				break;
+			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+				_print_next_block(par_num++,
+					"PXPPCICLOCKCLIENT");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+				_print_next_block(par_num++, "CFC");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+				_print_next_block(par_num++, "CDU");
+				break;
+			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+				_print_next_block(par_num++, "IGU");
+				break;
+			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+				_print_next_block(par_num++, "MISC");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+				_print_next_block(par_num++, "MCP ROM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+				_print_next_block(par_num++, "MCP UMP RX");
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+				_print_next_block(par_num++, "MCP UMP TX");
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+				_print_next_block(par_num++, "MCP SCPAD");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+				     u32 sig2, u32 sig3)
+{
+	if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+	    (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+		int par_num = 0;
+		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+			"[0]:0x%08x [1]:0x%08x "
+			"[2]:0x%08x [3]:0x%08x\n",
+			  sig0 & HW_PRTY_ASSERT_SET_0,
+			  sig1 & HW_PRTY_ASSERT_SET_1,
+			  sig2 & HW_PRTY_ASSERT_SET_2,
+			  sig3 & HW_PRTY_ASSERT_SET_3);
+		printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+		       bp->dev->name);
+		par_num = bnx2x_print_blocks_with_parity0(
+			sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+		par_num = bnx2x_print_blocks_with_parity1(
+			sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+		par_num = bnx2x_print_blocks_with_parity2(
+			sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+		par_num = bnx2x_print_blocks_with_parity3(
+			sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+		printk("\n");
+		return true;
+	} else
+		return false;
+}
+
+bool bnx2x_chk_parity_attn(struct bnx2x *bp)
+{
+	struct attn_route attn;
+	int port = BP_PORT(bp);
+
+	attn.sig[0] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+			     port*4);
+	attn.sig[1] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+			     port*4);
+	attn.sig[2] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+			     port*4);
+	attn.sig[3] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+			     port*4);
+
+	return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+					attn.sig[3]);
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+	struct attn_route attn, *group_mask;
+	int port = BP_PORT(bp);
+	int index;
+	u32 reg_addr;
+	u32 val;
+	u32 aeu_mask;
+
+	/* need to take HW lock because MCP or other port might also
+	   try to handle this event */
+	bnx2x_acquire_alr(bp);
+
+	if (bnx2x_chk_parity_attn(bp)) {
+		bp->recovery_state = BNX2X_RECOVERY_INIT;
+		bnx2x_set_reset_in_progress(bp);
+		schedule_delayed_work(&bp->reset_task, 0);
+		/* Disable HW interrupts */
+		bnx2x_int_disable(bp);
+		bnx2x_release_alr(bp);
+		/* In case of parity errors don't handle attentions so that
+		 * other function would "see" parity errors.
+		 */
+		return;
+	}
+
+	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
+	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
+
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		if (deasserted & (1 << index)) {
+			group_mask = &bp->attn_group[index];
+
+			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
+			   index, group_mask->sig[0], group_mask->sig[1],
+			   group_mask->sig[2], group_mask->sig[3]);
+
+			bnx2x_attn_int_deasserted3(bp,
+					attn.sig[3] & group_mask->sig[3]);
+			bnx2x_attn_int_deasserted1(bp,
+					attn.sig[1] & group_mask->sig[1]);
+			bnx2x_attn_int_deasserted2(bp,
+					attn.sig[2] & group_mask->sig[2]);
+			bnx2x_attn_int_deasserted0(bp,
+					attn.sig[0] & group_mask->sig[0]);
+		}
+	}
+
+	bnx2x_release_alr(bp);
+
+	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
+
+	val = ~deasserted;
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+	   val, reg_addr);
+	REG_WR(bp, reg_addr, val);
+
+	if (~bp->attn_state & deasserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, reg_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
+	   aeu_mask, deasserted);
+	aeu_mask |= (deasserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, reg_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state &= ~deasserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+	/* read local copy of bits */
+	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits);
+	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits_ack);
+	u32 attn_state = bp->attn_state;
+
+	/* look for changed bits */
+	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
+	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
+
+	DP(NETIF_MSG_HW,
+	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
+	   attn_bits, attn_ack, asserted, deasserted);
+
+	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+		BNX2X_ERR("BAD attention state\n");
+
+	/* handle bits that were raised */
+	if (asserted)
+		bnx2x_attn_int_asserted(bp, asserted);
+
+	if (deasserted)
+		bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+	u16 status;
+
+	/* Return here if interrupt is disabled */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+		return;
+	}
+
+	status = bnx2x_update_dsb_idx(bp);
+/*	if (status == 0)				     */
+/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
+
+	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+
+	/* HW attentions */
+	if (status & 0x1) {
+		bnx2x_attn_int(bp);
+		status &= ~0x1;
+	}
+
+	/* CStorm events: STAT_QUERY */
+	if (status & 0x2) {
+		DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+		status &= ~0x2;
+	}
+
+	if (unlikely(status))
+		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+		   status);
+
+	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
+		     IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
+		     IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
+		     IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
+		     IGU_INT_NOP, 1);
+	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
+		     IGU_INT_ENABLE, 1);
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Return here if interrupt is disabled */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+		return IRQ_HANDLED;
+	}
+
+	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+#ifdef BCM_CNIC
+	{
+		struct cnic_ops *c_ops;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+	}
+#endif
+	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+	return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+static void bnx2x_timer(unsigned long data)
+{
+	struct bnx2x *bp = (struct bnx2x *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto timer_restart;
+
+	if (poll) {
+		struct bnx2x_fastpath *fp = &bp->fp[0];
+		int rc;
+
+		bnx2x_tx_int(fp);
+		rc = bnx2x_rx_int(fp, 1000);
+	}
+
+	if (!BP_NOMCP(bp)) {
+		int func = BP_FUNC(bp);
+		u32 drv_pulse;
+		u32 mcp_pulse;
+
+		++bp->fw_drv_pulse_wr_seq;
+		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+		/* TBD - add SYSTEM_TIME */
+		drv_pulse = bp->fw_drv_pulse_wr_seq;
+		SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
+
+		mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
+			     MCP_PULSE_SEQ_MASK);
+		/* The delta between driver pulse and mcp response
+		 * should be 1 (before mcp response) or 0 (after mcp response)
+		 */
+		if ((drv_pulse != mcp_pulse) &&
+		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+			/* someone lost a heartbeat... */
+			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+				  drv_pulse, mcp_pulse);
+		}
+	}
+
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+timer_restart:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
+{
+	int port = BP_PORT(bp);
+
+	/* "CSTORM" */
+	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+			CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
+			CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
+	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+			CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
+			CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
+}
+
+void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+			  dma_addr_t mapping, int sb_id)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int index;
+	u64 section;
+
+	/* USTORM */
+	section = ((u64)mapping) + offsetof(struct host_status_block,
+					    u_status_block);
+	sb->u_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
+		CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
+
+	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
+
+	/* CSTORM */
+	section = ((u64)mapping) + offsetof(struct host_status_block,
+					    c_status_block);
+	sb->c_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
+		CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
+
+	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
+
+	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+static void bnx2x_zero_def_sb(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
+			TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
+			sizeof(struct tstorm_def_status_block)/4);
+	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+			CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
+			sizeof(struct cstorm_def_status_block_u)/4);
+	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+			CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
+			sizeof(struct cstorm_def_status_block_c)/4);
+	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
+			XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
+			sizeof(struct xstorm_def_status_block)/4);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp,
+			      struct host_def_status_block *def_sb,
+			      dma_addr_t mapping, int sb_id)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int index, val, reg_offset;
+	u64 section;
+
+	/* ATTN */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					    atten_status_block);
+	def_sb->atten_status_block.status_block_id = sb_id;
+
+	bp->attn_state = 0;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		bp->attn_group[index].sig[0] = REG_RD(bp,
+						     reg_offset + 0x10*index);
+		bp->attn_group[index].sig[1] = REG_RD(bp,
+					       reg_offset + 0x4 + 0x10*index);
+		bp->attn_group[index].sig[2] = REG_RD(bp,
+					       reg_offset + 0x8 + 0x10*index);
+		bp->attn_group[index].sig[3] = REG_RD(bp,
+					       reg_offset + 0xc + 0x10*index);
+	}
+
+	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+			     HC_REG_ATTN_MSG0_ADDR_L);
+
+	REG_WR(bp, reg_offset, U64_LO(section));
+	REG_WR(bp, reg_offset + 4, U64_HI(section));
+
+	reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
+
+	val = REG_RD(bp, reg_offset);
+	val |= sb_id;
+	REG_WR(bp, reg_offset, val);
+
+	/* USTORM */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					    u_def_status_block);
+	def_sb->u_def_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
+		CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
+
+	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
+
+	/* CSTORM */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					    c_def_status_block);
+	def_sb->c_def_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
+	REG_WR(bp, BAR_CSTRORM_INTMEM +
+	       ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
+		CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
+
+	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
+
+	/* TSTORM */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					    t_def_status_block);
+	def_sb->t_def_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
+		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
+
+	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_TSTRORM_INTMEM +
+			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+
+	/* XSTORM */
+	section = ((u64)mapping) + offsetof(struct host_def_status_block,
+					    x_def_status_block);
+	def_sb->x_def_status_block.status_block_id = sb_id;
+
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
+	       U64_HI(section));
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
+		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
+
+	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
+		REG_WR16(bp, BAR_XSTRORM_INTMEM +
+			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+
+	bp->stats_pending = 0;
+	bp->set_mac_pending = 0;
+
+	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int i;
+
+	for_each_queue(bp, i) {
+		int sb_id = bp->fp[i].sb_id;
+
+		/* HC_INDEX_U_ETH_RX_CQ_CONS */
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
+						      U_SB_ETH_RX_CQ_INDEX),
+			bp->rx_ticks/(4 * BNX2X_BTR));
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
+						       U_SB_ETH_RX_CQ_INDEX),
+			 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
+
+		/* HC_INDEX_C_ETH_TX_CQ_CONS */
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
+						      C_SB_ETH_TX_CQ_INDEX),
+			bp->tx_ticks/(4 * BNX2X_BTR));
+		REG_WR16(bp, BAR_CSTRORM_INTMEM +
+			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
+						       C_SB_ETH_TX_CQ_INDEX),
+			 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
+	}
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	spin_lock_init(&bp->spq_lock);
+
+	bp->spq_left = MAX_SPQ_PENDING;
+	bp->spq_prod_idx = 0;
+	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+	bp->spq_prod_bd = bp->spq;
+	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
+	       U64_LO(bp->spq_mapping));
+	REG_WR(bp,
+	       XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
+	       U64_HI(bp->spq_mapping));
+
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
+	       bp->spq_prod_idx);
+}
+
+static void bnx2x_init_context(struct bnx2x *bp)
+{
+	int i;
+
+	/* Rx */
+	for_each_queue(bp, i) {
+		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		u8 cl_id = fp->cl_id;
+
+		context->ustorm_st_context.common.sb_index_numbers =
+						BNX2X_RX_SB_INDEX_NUM;
+		context->ustorm_st_context.common.clientId = cl_id;
+		context->ustorm_st_context.common.status_block_id = fp->sb_id;
+		context->ustorm_st_context.common.flags =
+			(USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
+			 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
+		context->ustorm_st_context.common.statistics_counter_id =
+						cl_id;
+		context->ustorm_st_context.common.mc_alignment_log_size =
+						BNX2X_RX_ALIGN_SHIFT;
+		context->ustorm_st_context.common.bd_buff_size =
+						bp->rx_buf_size;
+		context->ustorm_st_context.common.bd_page_base_hi =
+						U64_HI(fp->rx_desc_mapping);
+		context->ustorm_st_context.common.bd_page_base_lo =
+						U64_LO(fp->rx_desc_mapping);
+		if (!fp->disable_tpa) {
+			context->ustorm_st_context.common.flags |=
+				USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
+			context->ustorm_st_context.common.sge_buff_size =
+				(u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+					   0xffff);
+			context->ustorm_st_context.common.sge_page_base_hi =
+						U64_HI(fp->rx_sge_mapping);
+			context->ustorm_st_context.common.sge_page_base_lo =
+						U64_LO(fp->rx_sge_mapping);
+
+			context->ustorm_st_context.common.max_sges_for_packet =
+				SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+			context->ustorm_st_context.common.max_sges_for_packet =
+				((context->ustorm_st_context.common.
+				  max_sges_for_packet + PAGES_PER_SGE - 1) &
+				 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
+		}
+
+		context->ustorm_ag_context.cdu_usage =
+			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+					       CDU_REGION_NUMBER_UCM_AG,
+					       ETH_CONNECTION_TYPE);
+
+		context->xstorm_ag_context.cdu_reserved =
+			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+					       CDU_REGION_NUMBER_XCM_AG,
+					       ETH_CONNECTION_TYPE);
+	}
+
+	/* Tx */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct eth_context *context =
+			bnx2x_sp(bp, context[i].eth);
+
+		context->cstorm_st_context.sb_index_number =
+						C_SB_ETH_TX_CQ_INDEX;
+		context->cstorm_st_context.status_block_id = fp->sb_id;
+
+		context->xstorm_st_context.tx_bd_page_base_hi =
+						U64_HI(fp->tx_desc_mapping);
+		context->xstorm_st_context.tx_bd_page_base_lo =
+						U64_LO(fp->tx_desc_mapping);
+		context->xstorm_st_context.statistics_data = (fp->cl_id |
+				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
+	}
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	int i;
+
+	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+		return;
+
+	DP(NETIF_MSG_IFUP,
+	   "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
+	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+		REG_WR8(bp, BAR_TSTRORM_INTMEM +
+			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
+			bp->fp->cl_id + (i % bp->num_queues));
+}
+
+void bnx2x_set_client_config(struct bnx2x *bp)
+{
+	struct tstorm_eth_client_config tstorm_client = {0};
+	int port = BP_PORT(bp);
+	int i;
+
+	tstorm_client.mtu = bp->dev->mtu;
+	tstorm_client.config_flags =
+				(TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
+				 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
+#ifdef BCM_VLAN
+	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
+		tstorm_client.config_flags |=
+				TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
+		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+	}
+#endif
+
+	for_each_queue(bp, i) {
+		tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
+
+		REG_WR(bp, BAR_TSTRORM_INTMEM +
+		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
+		       ((u32 *)&tstorm_client)[0]);
+		REG_WR(bp, BAR_TSTRORM_INTMEM +
+		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
+		       ((u32 *)&tstorm_client)[1]);
+	}
+
+	DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
+	   ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
+}
+
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
+	int mode = bp->rx_mode;
+	int mask = bp->rx_mode_cl_mask;
+	int func = BP_FUNC(bp);
+	int port = BP_PORT(bp);
+	int i;
+	/* All but management unicast packets should pass to the host as well */
+	u32 llh_mask =
+		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
+		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
+		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
+		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
+
+	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
+
+	switch (mode) {
+	case BNX2X_RX_MODE_NONE: /* no Rx */
+		tstorm_mac_filter.ucast_drop_all = mask;
+		tstorm_mac_filter.mcast_drop_all = mask;
+		tstorm_mac_filter.bcast_drop_all = mask;
+		break;
+
+	case BNX2X_RX_MODE_NORMAL:
+		tstorm_mac_filter.bcast_accept_all = mask;
+		break;
+
+	case BNX2X_RX_MODE_ALLMULTI:
+		tstorm_mac_filter.mcast_accept_all = mask;
+		tstorm_mac_filter.bcast_accept_all = mask;
+		break;
+
+	case BNX2X_RX_MODE_PROMISC:
+		tstorm_mac_filter.ucast_accept_all = mask;
+		tstorm_mac_filter.mcast_accept_all = mask;
+		tstorm_mac_filter.bcast_accept_all = mask;
+		/* pass management unicast packets as well */
+		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
+		break;
+
+	default:
+		BNX2X_ERR("BAD rx mode (%d)\n", mode);
+		break;
+	}
+
+	REG_WR(bp,
+	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
+	       llh_mask);
+
+	for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
+		REG_WR(bp, BAR_TSTRORM_INTMEM +
+		       TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
+		       ((u32 *)&tstorm_mac_filter)[i]);
+
+/*		DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
+		   ((u32 *)&tstorm_mac_filter)[i]); */
+	}
+
+	if (mode != BNX2X_RX_MODE_NONE)
+		bnx2x_set_client_config(bp);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+	int i;
+
+	/* Zero this manually as its initialization is
+	   currently missing in the initTool */
+	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
+}
+
+static void bnx2x_init_internal_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	REG_WR(bp,
+	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp,
+	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+}
+
+static void bnx2x_init_internal_func(struct bnx2x *bp)
+{
+	struct tstorm_eth_function_common_config tstorm_config = {0};
+	struct stats_indication_flags stats_flags = {0};
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int i, j;
+	u32 offset;
+	u16 max_agg_size;
+
+	tstorm_config.config_flags = RSS_FLAGS(bp);
+
+	if (is_multi(bp))
+		tstorm_config.rss_result_mask = MULTI_MASK;
+
+	/* Enable TPA if needed */
+	if (bp->flags & TPA_ENABLE_FLAG)
+		tstorm_config.config_flags |=
+			TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
+
+	if (IS_E1HMF(bp))
+		tstorm_config.config_flags |=
+				TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
+
+	tstorm_config.leading_client_id = BP_L_ID(bp);
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
+	       (*(u32 *)&tstorm_config));
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
+	bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
+	bnx2x_set_storm_rx_mode(bp);
+
+	for_each_queue(bp, i) {
+		u8 cl_id = bp->fp[i].cl_id;
+
+		/* reset xstorm per client statistics */
+		offset = BAR_XSTRORM_INTMEM +
+			 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+		for (j = 0;
+		     j < sizeof(struct xstorm_per_client_stats) / 4; j++)
+			REG_WR(bp, offset + j*4, 0);
+
+		/* reset tstorm per client statistics */
+		offset = BAR_TSTRORM_INTMEM +
+			 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+		for (j = 0;
+		     j < sizeof(struct tstorm_per_client_stats) / 4; j++)
+			REG_WR(bp, offset + j*4, 0);
+
+		/* reset ustorm per client statistics */
+		offset = BAR_USTRORM_INTMEM +
+			 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+		for (j = 0;
+		     j < sizeof(struct ustorm_per_client_stats) / 4; j++)
+			REG_WR(bp, offset + j*4, 0);
+	}
+
+	/* Init statistics related context */
+	stats_flags.collect_eth = 1;
+
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
+	       ((u32 *)&stats_flags)[0]);
+	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
+	       ((u32 *)&stats_flags)[1]);
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
+	       ((u32 *)&stats_flags)[0]);
+	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
+	       ((u32 *)&stats_flags)[1]);
+
+	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
+	       ((u32 *)&stats_flags)[0]);
+	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
+	       ((u32 *)&stats_flags)[1]);
+
+	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
+	       ((u32 *)&stats_flags)[0]);
+	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
+	       ((u32 *)&stats_flags)[1]);
+
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+	REG_WR(bp, BAR_USTRORM_INTMEM +
+	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+	REG_WR(bp, BAR_USTRORM_INTMEM +
+	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
+
+	if (CHIP_IS_E1H(bp)) {
+		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
+			IS_E1HMF(bp));
+		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
+			IS_E1HMF(bp));
+		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
+			IS_E1HMF(bp));
+		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
+			IS_E1HMF(bp));
+
+		REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
+			 bp->e1hov);
+	}
+
+	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
+	max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+				   SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
+		       U64_LO(fp->rx_comp_mapping));
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
+		       U64_HI(fp->rx_comp_mapping));
+
+		/* Next page */
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
+		       U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
+		       U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
+
+		REG_WR16(bp, BAR_USTRORM_INTMEM +
+			 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
+			 max_agg_size);
+	}
+
+	/* dropless flow control */
+	if (CHIP_IS_E1H(bp)) {
+		struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
+
+		rx_pause.bd_thr_low = 250;
+		rx_pause.cqe_thr_low = 250;
+		rx_pause.cos = 1;
+		rx_pause.sge_thr_low = 0;
+		rx_pause.bd_thr_high = 350;
+		rx_pause.cqe_thr_high = 350;
+		rx_pause.sge_thr_high = 0;
+
+		for_each_queue(bp, i) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+
+			if (!fp->disable_tpa) {
+				rx_pause.sge_thr_low = 150;
+				rx_pause.sge_thr_high = 250;
+			}
+
+
+			offset = BAR_USTRORM_INTMEM +
+				 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
+								   fp->cl_id);
+			for (j = 0;
+			     j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
+			     j++)
+				REG_WR(bp, offset + j*4,
+				       ((u32 *)&rx_pause)[j]);
+		}
+	}
+
+	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+	/* Init rate shaping and fairness contexts */
+	if (IS_E1HMF(bp)) {
+		int vn;
+
+		/* During init there is no active link
+		   Until link is up, set link rate to 10Gbps */
+		bp->link_vars.line_speed = SPEED_10000;
+		bnx2x_init_port_minmax(bp);
+
+		if (!BP_NOMCP(bp))
+			bp->mf_config =
+			      SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+		bnx2x_calc_vn_weight_sum(bp);
+
+		for (vn = VN_0; vn < E1HVN_MAX; vn++)
+			bnx2x_init_vn_minmax(bp, 2*vn + port);
+
+		/* Enable rate shaping and fairness */
+		bp->cmng.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+
+	} else {
+		/* rate shaping and fairness are disabled */
+		DP(NETIF_MSG_IFUP,
+		   "single function mode  minmax will be disabled\n");
+	}
+
+
+	/* Store cmng structures to internal memory */
+	if (bp->port.pmf)
+		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
+			REG_WR(bp, BAR_XSTRORM_INTMEM +
+			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
+			       ((u32 *)(&bp->cmng))[i]);
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		bnx2x_init_internal_common(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		bnx2x_init_internal_port(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		bnx2x_init_internal_func(bp);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		break;
+	}
+}
+
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		fp->bp = bp;
+		fp->state = BNX2X_FP_STATE_CLOSED;
+		fp->index = i;
+		fp->cl_id = BP_L_ID(bp) + i;
+#ifdef BCM_CNIC
+		fp->sb_id = fp->cl_id + 1;
+#else
+		fp->sb_id = fp->cl_id;
+#endif
+		DP(NETIF_MSG_IFUP,
+		   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
+		   i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
+		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
+			      fp->sb_id);
+		bnx2x_update_fpsb_idx(fp);
+	}
+
+	/* ensure status block indices were read */
+	rmb();
+
+
+	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
+			  DEF_SB_ID);
+	bnx2x_update_dsb_idx(bp);
+	bnx2x_update_coalesce(bp);
+	bnx2x_init_rx_rings(bp);
+	bnx2x_init_tx_ring(bp);
+	bnx2x_init_sp_ring(bp);
+	bnx2x_init_context(bp);
+	bnx2x_init_internal(bp, load_code);
+	bnx2x_init_ind_table(bp);
+	bnx2x_stats_init(bp);
+
+	/* At this point, we are ready for interrupts */
+	atomic_set(&bp->intr_sem, 0);
+
+	/* flush all before enabling interrupts */
+	mb();
+	mmiowb();
+
+	bnx2x_int_enable(bp);
+
+	/* Check for SPIO5 */
+	bnx2x_attn_int_deasserted0(bp,
+		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+				   AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* end of nic init */
+
+/*
+ * gzip service functions
+ */
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+					    &bp->gunzip_mapping, GFP_KERNEL);
+	if (bp->gunzip_buf  == NULL)
+		goto gunzip_nomem1;
+
+	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+	if (bp->strm  == NULL)
+		goto gunzip_nomem2;
+
+	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
+				      GFP_KERNEL);
+	if (bp->strm->workspace == NULL)
+		goto gunzip_nomem3;
+
+	return 0;
+
+gunzip_nomem3:
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+gunzip_nomem2:
+	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+			  bp->gunzip_mapping);
+	bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+	       " un-compression\n");
+	return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+	kfree(bp->strm->workspace);
+
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+	if (bp->gunzip_buf) {
+		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+				  bp->gunzip_mapping);
+		bp->gunzip_buf = NULL;
+	}
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+	int n, rc;
+
+	/* check gzip header */
+	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+		BNX2X_ERR("Bad gzip header\n");
+		return -EINVAL;
+	}
+
+	n = 10;
+
+#define FNAME				0x8
+
+	if (zbuf[3] & FNAME)
+		while ((zbuf[n++] != 0) && (n < len));
+
+	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+	bp->strm->avail_in = len - n;
+	bp->strm->next_out = bp->gunzip_buf;
+	bp->strm->avail_out = FW_BUF_SIZE;
+
+	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+	if (rc != Z_OK)
+		return rc;
+
+	rc = zlib_inflate(bp->strm, Z_FINISH);
+	if ((rc != Z_OK) && (rc != Z_STREAM_END))
+		netdev_err(bp->dev, "Firmware decompression error: %s\n",
+			   bp->strm->msg);
+
+	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+	if (bp->gunzip_outlen & 0x3)
+		netdev_err(bp->dev, "Firmware decompression error:"
+				    " gunzip_outlen (%d) not aligned\n",
+				bp->gunzip_outlen);
+	bp->gunzip_outlen >>= 2;
+
+	zlib_inflateEnd(bp->strm);
+
+	if (rc == Z_STREAM_END)
+		return 0;
+
+	return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+	u32 wb_write[3];
+
+	/* Ethernet source and destination addresses */
+	wb_write[0] = 0x55555555;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x20;		/* SOP */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+	/* NON-IP protocol */
+	wb_write[0] = 0x09000000;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+	int factor;
+	int count, i;
+	u32 val = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		factor = 120;
+	else if (CHIP_REV_IS_EMUL(bp))
+		factor = 200;
+	else
+		factor = 1;
+
+	DP(NETIF_MSG_HW, "start part1\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/*  Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send Ethernet packet */
+	bnx2x_lb_pckt(bp);
+
+	/* TODO do i reset NIG statistic? */
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0x10)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0x10) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -1;
+	}
+
+	/* Wait until PRS register shows 1 packet */
+	count = 1000 * factor;
+	while (count) {
+		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+		if (val == 1)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0x1) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+		return -2;
+	}
+
+	/* Reset and init BRB, PRS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+
+	DP(NETIF_MSG_HW, "part2\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/* Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send 10 Ethernet packets */
+	for (i = 0; i < 10; i++)
+		bnx2x_lb_pckt(bp);
+
+	/* Wait until NIG register shows 10 + 1
+	   packets of size 11*0x10 = 0xb0 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0xb0)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0xb0) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -3;
+	}
+
+	/* Wait until PRS register shows 2 packets */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 2)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* Write 1 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+	/* Wait until PRS register shows 3 packets */
+	msleep(10 * factor);
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 3)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* clear NIG EOP FIFO */
+	for (i = 0; i < 11; i++)
+		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+	if (val != 1) {
+		BNX2X_ERR("clear of NIG failed\n");
+		return -4;
+	}
+
+	/* Reset and init BRB, PRS, NIG */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+#ifndef BCM_CNIC
+	/* set NIC mode */
+	REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+
+	/* Enable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+	DP(NETIF_MSG_HW, "done\n");
+
+	return 0; /* OK */
+}
+
+static void enable_blocks_attention(struct bnx2x *bp)
+{
+	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+	if (CHIP_REV_IS_FPGA(bp))
+		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+	else
+		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
+	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
+}
+
+static const struct {
+	u32 addr;
+	u32 mask;
+} bnx2x_parity_mask[] = {
+	{PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+	{PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+	{PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+	{HC_REG_HC_PRTY_MASK, 0xffffffff},
+	{MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+	{QM_REG_QM_PRTY_MASK, 0x0},
+	{DORQ_REG_DORQ_PRTY_MASK, 0x0},
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+	{SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+	{CDU_REG_CDU_PRTY_MASK, 0x0},
+	{CFC_REG_CFC_PRTY_MASK, 0x0},
+	{DBG_REG_DBG_PRTY_MASK, 0x0},
+	{DMAE_REG_DMAE_PRTY_MASK, 0x0},
+	{BRB1_REG_BRB1_PRTY_MASK, 0x0},
+	{PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+	{TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+	{CSDM_REG_CSDM_PRTY_MASK, 0x8},	/* bit 3 */
+	{USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+	{XSDM_REG_XSDM_PRTY_MASK, 0x8},	/* bit 3 */
+	{TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+	{TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+	{USEM_REG_USEM_PRTY_MASK_0, 0x0},
+	{USEM_REG_USEM_PRTY_MASK_1, 0x0},
+	{CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+	{CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+	{XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+	{XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
+
+static void enable_blocks_parity(struct bnx2x *bp)
+{
+	int i, mask_arr_len =
+		sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+
+	for (i = 0; i < mask_arr_len; i++)
+		REG_WR(bp, bnx2x_parity_mask[i].addr,
+			bnx2x_parity_mask[i].mask);
+}
+
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+	/* reset_common */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0xd3ffff7f);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+	u16 devctl;
+	int r_order, w_order;
+
+	pci_read_config_word(bp->pdev,
+			     bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+	if (bp->mrrs == -1)
+		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+	else {
+		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+		r_order = bp->mrrs;
+	}
+
+	bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+	int is_required;
+	u32 val;
+	int port;
+
+	if (BP_NOMCP(bp))
+		return;
+
+	is_required = 0;
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+	      SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+		is_required = 1;
+
+	/*
+	 * The fan failure mechanism is usually related to the PHY type since
+	 * the power consumption of the board is affected by the PHY. Currently,
+	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+	 */
+	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+		for (port = PORT_0; port < PORT_MAX; port++) {
+			u32 phy_type =
+				SHMEM_RD(bp, dev_info.port_hw_config[port].
+					 external_phy_config) &
+				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+			is_required |=
+				((phy_type ==
+				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
+				 (phy_type ==
+				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+				 (phy_type ==
+				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
+		}
+
+	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+	if (is_required == 0)
+		return;
+
+	/* Fan failure is indicated by SPIO 5 */
+	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+		       MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+	/* set to active low mode */
+	val = REG_RD(bp, MISC_REG_SPIO_INT);
+	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+	REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+	/* enable interrupt to signal the IGU */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	val |= (1 << MISC_REGISTERS_SPIO_5);
+	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static int bnx2x_init_common(struct bnx2x *bp)
+{
+	u32 val, i;
+#ifdef BCM_CNIC
+	u32 wb_write[2];
+#endif
+
+	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
+
+	bnx2x_reset_common(bp);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
+
+	bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
+	if (CHIP_IS_E1H(bp))
+		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
+
+	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
+	msleep(30);
+	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
+
+	bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
+	if (CHIP_IS_E1(bp)) {
+		/* enable HW interrupt from PXP on USDM overflow
+		   bit 16 on INT_MASK_0 */
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	}
+
+	bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
+	bnx2x_init_pxp(bp);
+
+#ifdef __BIG_ENDIAN
+	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+	/* make sure this value is 0 */
+	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
+#ifdef BCM_CNIC
+	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
+	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
+	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
+#endif
+
+	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+	/* let the HW do it's magic ... */
+	msleep(100);
+	/* finish PXP init */
+	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 CFG failed\n");
+		return -EBUSY;
+	}
+	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 RD_INIT failed\n");
+		return -EBUSY;
+	}
+
+	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+	bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
+
+	/* clean the DMAE memory */
+	bp->dmae_ready = 1;
+	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
+
+	bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
+
+	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
+
+#ifdef BCM_CNIC
+	wb_write[0] = 0;
+	wb_write[1] = 0;
+	for (i = 0; i < 64; i++) {
+		REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
+		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
+
+		if (CHIP_IS_E1H(bp)) {
+			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
+			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+					  wb_write, 2);
+		}
+	}
+#endif
+	/* soft reset pulse */
+	REG_WR(bp, QM_REG_SOFT_RESET, 1);
+	REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
+#endif
+
+	bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
+	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
+	if (!CHIP_REV_IS_SLOW(bp)) {
+		/* enable hw interrupt from doorbell Q */
+		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+	}
+
+	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+#ifndef BCM_CNIC
+	/* set NIC mode */
+	REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+	if (CHIP_IS_E1H(bp))
+		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
+
+	bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
+
+	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+	bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+
+	bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
+
+	/* sync semi rtc */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0x80000000);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+	       0x80000000);
+
+	bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
+
+	REG_WR(bp, SRC_REG_SOFT_RST, 1);
+	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+		REG_WR(bp, i, random32());
+	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+#ifdef BCM_CNIC
+	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+	REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+	if (sizeof(union cdu_context) != 1024)
+		/* we currently assume that a context is 1024 bytes */
+		dev_alert(&bp->pdev->dev, "please adjust the size "
+					  "of cdu_context(%ld)\n",
+			 (long)sizeof(union cdu_context));
+
+	bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
+	val = (4 << 24) + (0 << 12) + 1024;
+	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+	bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
+	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+	/* enable context validation interrupt from CFC */
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+	/* set the thresholds to prevent CFC/CDU race */
+	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+	bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
+
+	bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2814, 0xffffffff);
+	REG_WR(bp, 0x3820, 0xffffffff);
+
+	bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
+	bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
+
+	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
+	if (CHIP_IS_E1H(bp)) {
+		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
+		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
+	}
+
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(200);
+
+	/* finish CFC init */
+	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC LL_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC AC_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC CAM_INIT failed\n");
+		return -EBUSY;
+	}
+	REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+	/* read NIG statistic
+	   to see if this is our first up since powerup */
+	bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+	val = *bnx2x_sp(bp, wb_data[0]);
+
+	/* do internal memory self test */
+	if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
+		BNX2X_ERR("internal mem self test failed\n");
+		return -EBUSY;
+	}
+
+	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+		bp->port.need_hw_lock = 1;
+		break;
+
+	default:
+		break;
+	}
+
+	bnx2x_setup_fan_failure_detection(bp);
+
+	/* clear PXP2 attentions */
+	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+	enable_blocks_attention(bp);
+	if (CHIP_PARITY_SUPPORTED(bp))
+		enable_blocks_parity(bp);
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_common_init_phy(bp, bp->common.shmem_base);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+	return 0;
+}
+
+static int bnx2x_init_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
+	u32 low, high;
+	u32 val;
+
+	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	bnx2x_init_block(bp, PXP_BLOCK, init_stage);
+	bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
+	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
+	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
+	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
+
+#ifdef BCM_CNIC
+	REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
+
+	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
+
+	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
+	if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
+		/* no pause for emulation and FPGA */
+		low = 0;
+		high = 513;
+	} else {
+		if (IS_E1HMF(bp))
+			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+		else if (bp->dev->mtu > 4096) {
+			if (bp->flags & ONE_PORT_FLAG)
+				low = 160;
+			else {
+				val = bp->dev->mtu;
+				/* (24*1024 + val*4)/256 */
+				low = 96 + (val/64) + ((val % 64) ? 1 : 0);
+			}
+		} else
+			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+		high = low + 56;	/* 14*1024/256 */
+	}
+	REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+	REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+
+
+	bnx2x_init_block(bp, PRS_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
+	bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
+	bnx2x_init_block(bp, USDM_BLOCK, init_stage);
+	bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
+	bnx2x_init_block(bp, USEM_BLOCK, init_stage);
+	bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
+	bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, UPB_BLOCK, init_stage);
+	bnx2x_init_block(bp, XPB_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, PBF_BLOCK, init_stage);
+
+	/* configure PBF to work without PAUSE mtu 9000 */
+	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+	/* update threshold */
+	REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+	/* update init credit */
+	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+	/* probe changes */
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+	msleep(5);
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
+#endif
+	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
+	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
+
+	if (CHIP_IS_E1(bp)) {
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	}
+	bnx2x_init_block(bp, HC_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
+	/* init aeu_mask_attn_func_0/1:
+	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+	 *             bits 4-7 are used for "per vn group attention" */
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
+	       (IS_E1HMF(bp) ? 0xF7 : 0x7));
+
+	bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
+	bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
+	bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
+	bnx2x_init_block(bp, DBU_BLOCK, init_stage);
+	bnx2x_init_block(bp, DBG_BLOCK, init_stage);
+
+	bnx2x_init_block(bp, NIG_BLOCK, init_stage);
+
+	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+	if (CHIP_IS_E1H(bp)) {
+		/* 0x2 disable e1hov, 0x1 enable */
+		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+		       (IS_E1HMF(bp) ? 0x1 : 0x2));
+
+		{
+			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+		}
+	}
+
+	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
+	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
+
+	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		{
+		u32 swap_val, swap_override, aeu_gpio_mask, offset;
+
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
+
+		/* The GPIO should be swapped if the swap register is
+		   set and active */
+		swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+		swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+		/* Select function upon port-swap configuration */
+		if (port == 0) {
+			offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+			aeu_gpio_mask = (swap_val && swap_override) ?
+				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
+				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
+		} else {
+			offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+			aeu_gpio_mask = (swap_val && swap_override) ?
+				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
+				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
+		}
+		val = REG_RD(bp, offset);
+		/* add GPIO3 to group */
+		val |= aeu_gpio_mask;
+		REG_WR(bp, offset, val);
+		}
+		break;
+
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+		/* add SPIO 5 to group 0 */
+		{
+		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+		val = REG_RD(bp, reg_addr);
+		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_addr, val);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	bnx2x__link_reset(bp);
+
+	return 0;
+}
+
+#define ILT_PER_FUNC		(768/2)
+#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
+/* the phys address is shifted right 12 bits and has an added
+   1=valid bit added to the 53rd bit
+   then since this is a wide register(TM)
+   we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
+#define PXP_ONE_ILT(x)		(((x) << 10) | x)
+#define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)
+
+#ifdef BCM_CNIC
+#define CNIC_ILT_LINES		127
+#define CNIC_CTX_PER_ILT	16
+#else
+#define CNIC_ILT_LINES		0
+#endif
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+	int reg;
+
+	if (CHIP_IS_E1H(bp))
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+	else /* E1 */
+		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+
+	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+}
+
+static int bnx2x_init_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	u32 addr, val;
+	int i;
+
+	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
+
+	/* set MSI reconfigure capability */
+	addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+	val = REG_RD(bp, addr);
+	val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+	REG_WR(bp, addr, val);
+
+	i = FUNC_ILT_BASE(func);
+
+	bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
+	if (CHIP_IS_E1H(bp)) {
+		REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
+	} else /* E1 */
+		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
+		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
+
+#ifdef BCM_CNIC
+	i += 1 + CNIC_ILT_LINES;
+	bnx2x_ilt_wr(bp, i, bp->timers_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
+	}
+
+	i++;
+	bnx2x_ilt_wr(bp, i, bp->qm_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
+	}
+
+	i++;
+	bnx2x_ilt_wr(bp, i, bp->t1_mapping);
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
+	else {
+		REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
+		REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
+	}
+
+	/* tell the searcher where the T2 table is */
+	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
+
+	bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
+		    U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
+
+	bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
+		    U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
+		    U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
+
+	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
+#endif
+
+	if (CHIP_IS_E1H(bp)) {
+		bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
+		bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
+
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
+	}
+
+	/* HC init per function */
+	if (CHIP_IS_E1H(bp)) {
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	}
+	bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2114, 0xffffffff);
+	REG_WR(bp, 0x2120, 0xffffffff);
+
+	return 0;
+}
+
+int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+	int i, rc = 0;
+
+	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
+	   BP_FUNC(bp), load_code);
+
+	bp->dmae_ready = 0;
+	mutex_init(&bp->dmae_mutex);
+	rc = bnx2x_gunzip_init(bp);
+	if (rc)
+		return rc;
+
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		rc = bnx2x_init_common(bp);
+		if (rc)
+			goto init_hw_err;
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		bp->dmae_ready = 1;
+		rc = bnx2x_init_port(bp);
+		if (rc)
+			goto init_hw_err;
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		bp->dmae_ready = 1;
+		rc = bnx2x_init_func(bp);
+		if (rc)
+			goto init_hw_err;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		break;
+	}
+
+	if (!BP_NOMCP(bp)) {
+		int func = BP_FUNC(bp);
+
+		bp->fw_drv_pulse_wr_seq =
+				(SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
+				 DRV_PULSE_SEQ_MASK);
+		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+	}
+
+	/* this needs to be done before gunzip end */
+	bnx2x_zero_def_sb(bp);
+	for_each_queue(bp, i)
+		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#ifdef BCM_CNIC
+	bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#endif
+
+init_hw_err:
+	bnx2x_gunzip_end(bp);
+
+	return rc;
+}
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+
+#define BNX2X_PCI_FREE(x, y, size) \
+	do { \
+		if (x) { \
+			dma_free_coherent(&bp->pdev->dev, size, x, y); \
+			x = NULL; \
+			y = 0; \
+		} \
+	} while (0)
+
+#define BNX2X_FREE(x) \
+	do { \
+		if (x) { \
+			vfree(x); \
+			x = NULL; \
+		} \
+	} while (0)
+
+	int i;
+
+	/* fastpath */
+	/* Common */
+	for_each_queue(bp, i) {
+
+		/* status blocks */
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
+			       bnx2x_fp(bp, i, status_blk_mapping),
+			       sizeof(struct host_status_block));
+	}
+	/* Rx */
+	for_each_queue(bp, i) {
+
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
+			       bnx2x_fp(bp, i, rx_desc_mapping),
+			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
+			       bnx2x_fp(bp, i, rx_comp_mapping),
+			       sizeof(struct eth_fast_path_rx_cqe) *
+			       NUM_RCQ_BD);
+
+		/* SGE ring */
+		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
+			       bnx2x_fp(bp, i, rx_sge_mapping),
+			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+	}
+	/* Tx */
+	for_each_queue(bp, i) {
+
+		/* fastpath tx rings: tx_buf tx_desc */
+		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
+			       bnx2x_fp(bp, i, tx_desc_mapping),
+			       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+	}
+	/* end of fastpath */
+
+	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+		       sizeof(struct host_def_status_block));
+
+	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+		       sizeof(struct bnx2x_slowpath));
+
+#ifdef BCM_CNIC
+	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
+	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
+	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
+	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
+	BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
+		       sizeof(struct host_status_block));
+#endif
+	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+#undef BNX2X_PCI_FREE
+#undef BNX2X_KFREE
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+
+#define BNX2X_PCI_ALLOC(x, y, size) \
+	do { \
+		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+		if (x == NULL) \
+			goto alloc_mem_err; \
+		memset(x, 0, size); \
+	} while (0)
+
+#define BNX2X_ALLOC(x, size) \
+	do { \
+		x = vmalloc(size); \
+		if (x == NULL) \
+			goto alloc_mem_err; \
+		memset(x, 0, size); \
+	} while (0)
+
+	int i;
+
+	/* fastpath */
+	/* Common */
+	for_each_queue(bp, i) {
+		bnx2x_fp(bp, i, bp) = bp;
+
+		/* status blocks */
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
+				&bnx2x_fp(bp, i, status_blk_mapping),
+				sizeof(struct host_status_block));
+	}
+	/* Rx */
+	for_each_queue(bp, i) {
+
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
+				sizeof(struct sw_rx_bd) * NUM_RX_BD);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
+				&bnx2x_fp(bp, i, rx_desc_mapping),
+				sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
+				&bnx2x_fp(bp, i, rx_comp_mapping),
+				sizeof(struct eth_fast_path_rx_cqe) *
+				NUM_RCQ_BD);
+
+		/* SGE ring */
+		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
+				sizeof(struct sw_rx_page) * NUM_RX_SGE);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
+				&bnx2x_fp(bp, i, rx_sge_mapping),
+				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+	}
+	/* Tx */
+	for_each_queue(bp, i) {
+
+		/* fastpath tx rings: tx_buf tx_desc */
+		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
+				sizeof(struct sw_tx_bd) * NUM_TX_BD);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
+				&bnx2x_fp(bp, i, tx_desc_mapping),
+				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+	}
+	/* end of fastpath */
+
+	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+			sizeof(struct host_def_status_block));
+
+	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+			sizeof(struct bnx2x_slowpath));
+
+#ifdef BCM_CNIC
+	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
+
+	/* allocate searcher T2 table
+	   we allocate 1/4 of alloc num for T2
+	  (which is not entered into the ILT) */
+	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
+
+	/* Initialize T2 (for 1024 connections) */
+	for (i = 0; i < 16*1024; i += 64)
+		*(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
+
+	/* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
+	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
+
+	/* QM queues (128*MAX_CONN) */
+	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
+
+	BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
+			sizeof(struct host_status_block));
+#endif
+
+	/* Slow path ring */
+	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem(bp);
+	return -ENOMEM;
+
+#undef BNX2X_PCI_ALLOC
+#undef BNX2X_ALLOC
+}
+
+
+/*
+ * Init service functions
+ */
+
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1 chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
+ * @param with_bcast set broadcast MAC as well
+ */
+static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
+				      u32 cl_bit_vec, u8 cam_offset,
+				      u8 with_bcast)
+{
+	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+	int port = BP_PORT(bp);
+
+	/* CAM allocation
+	 * unicasts 0-31:port0 32-63:port1
+	 * multicast 64-127:port0 128-191:port1
+	 */
+	config->hdr.length = 1 + (with_bcast ? 1 : 0);
+	config->hdr.offset = cam_offset;
+	config->hdr.client_id = 0xff;
+	config->hdr.reserved1 = 0;
+
+	/* primary MAC */
+	config->config_table[0].cam_entry.msb_mac_addr =
+					swab16(*(u16 *)&mac[0]);
+	config->config_table[0].cam_entry.middle_mac_addr =
+					swab16(*(u16 *)&mac[2]);
+	config->config_table[0].cam_entry.lsb_mac_addr =
+					swab16(*(u16 *)&mac[4]);
+	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
+	if (set)
+		config->config_table[0].target_table_entry.flags = 0;
+	else
+		CAM_INVALIDATE(config->config_table[0]);
+	config->config_table[0].target_table_entry.clients_bit_vector =
+						cpu_to_le32(cl_bit_vec);
+	config->config_table[0].target_table_entry.vlan_id = 0;
+
+	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
+	   (set ? "setting" : "clearing"),
+	   config->config_table[0].cam_entry.msb_mac_addr,
+	   config->config_table[0].cam_entry.middle_mac_addr,
+	   config->config_table[0].cam_entry.lsb_mac_addr);
+
+	/* broadcast */
+	if (with_bcast) {
+		config->config_table[1].cam_entry.msb_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.middle_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.lsb_mac_addr =
+			cpu_to_le16(0xffff);
+		config->config_table[1].cam_entry.flags = cpu_to_le16(port);
+		if (set)
+			config->config_table[1].target_table_entry.flags =
+					TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+		else
+			CAM_INVALIDATE(config->config_table[1]);
+		config->config_table[1].target_table_entry.clients_bit_vector =
+							cpu_to_le32(cl_bit_vec);
+		config->config_table[1].target_table_entry.vlan_id = 0;
+	}
+
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+}
+
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1H chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
+ */
+static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
+				       u32 cl_bit_vec, u8 cam_offset)
+{
+	struct mac_configuration_cmd_e1h *config =
+		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
+
+	config->hdr.length = 1;
+	config->hdr.offset = cam_offset;
+	config->hdr.client_id = 0xff;
+	config->hdr.reserved1 = 0;
+
+	/* primary MAC */
+	config->config_table[0].msb_mac_addr =
+					swab16(*(u16 *)&mac[0]);
+	config->config_table[0].middle_mac_addr =
+					swab16(*(u16 *)&mac[2]);
+	config->config_table[0].lsb_mac_addr =
+					swab16(*(u16 *)&mac[4]);
+	config->config_table[0].clients_bit_vector =
+					cpu_to_le32(cl_bit_vec);
+	config->config_table[0].vlan_id = 0;
+	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
+	if (set)
+		config->config_table[0].flags = BP_PORT(bp);
+	else
+		config->config_table[0].flags =
+				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
+
+	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
+	   (set ? "setting" : "clearing"),
+	   config->config_table[0].msb_mac_addr,
+	   config->config_table[0].middle_mac_addr,
+	   config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
+
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+}
+
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+			     int *state_p, int poll)
+{
+	/* can take a while if any port is running */
+	int cnt = 5000;
+
+	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
+	   poll ? "polling" : "waiting", state, idx);
+
+	might_sleep();
+	while (cnt--) {
+		if (poll) {
+			bnx2x_rx_int(bp->fp, 10);
+			/* if index is different from 0
+			 * the reply for some commands will
+			 * be on the non default queue
+			 */
+			if (idx)
+				bnx2x_rx_int(&bp->fp[idx], 10);
+		}
+
+		mb(); /* state is changed by bnx2x_sp_event() */
+		if (*state_p == state) {
+#ifdef BNX2X_STOP_ON_ERROR
+			DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
+#endif
+			return 0;
+		}
+
+		msleep(1);
+
+		if (bp->panic)
+			return -EIO;
+	}
+
+	/* timeout! */
+	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
+		  poll ? "polling" : "waiting", state, idx);
+#ifdef BNX2X_STOP_ON_ERROR
+	bnx2x_panic();
+#endif
+
+	return -EBUSY;
+}
+
+void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
+{
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
+				   (1 << bp->fp->cl_id), BP_FUNC(bp));
+
+	/* Wait for a completion */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+}
+
+void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
+{
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
+				  (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
+				  1);
+
+	/* Wait for a completion */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+}
+
+#ifdef BCM_CNIC
+/**
+ * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). This function will wait until the ramdord completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+{
+	u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
+
+	bp->set_mac_pending++;
+	smp_wmb();
+
+	/* Send a SET_MAC ramrod */
+	if (CHIP_IS_E1(bp))
+		bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
+				  cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
+				  1);
+	else
+		/* CAM allocation for E1H
+		* unicasts: by func number
+		* multicast: 20+FUNC*20, 20 each
+		*/
+		bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
+				   cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
+
+	/* Wait for a completion when setting */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+
+	return 0;
+}
+#endif
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+	int rc;
+
+	/* reset IGU state */
+	bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+
+	/* SETUP ramrod */
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
+
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
+
+	return rc;
+}
+
+int bnx2x_setup_multi(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+
+	/* reset IGU state */
+	bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+
+	/* SETUP ramrod */
+	fp->state = BNX2X_FP_STATE_OPENING;
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
+		      fp->cl_id, 0);
+
+	/* Wait for completion */
+	return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
+				 &(fp->state), 0);
+}
+
+
+void bnx2x_set_num_queues_msix(struct bnx2x *bp)
+{
+
+	switch (bp->multi_mode) {
+	case ETH_RSS_MODE_DISABLED:
+		bp->num_queues = 1;
+		break;
+
+	case ETH_RSS_MODE_REGULAR:
+		if (num_queues)
+			bp->num_queues = min_t(u32, num_queues,
+						  BNX2X_MAX_QUEUES(bp));
+		else
+			bp->num_queues = min_t(u32, num_online_cpus(),
+						  BNX2X_MAX_QUEUES(bp));
+		break;
+
+
+	default:
+		bp->num_queues = 1;
+		break;
+	}
+}
+
+
+
+static int bnx2x_stop_multi(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	int rc;
+
+	/* halt the connection */
+	fp->state = BNX2X_FP_STATE_HALTING;
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
+
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
+			       &(fp->state), 1);
+	if (rc) /* timeout */
+		return rc;
+
+	/* delete cfc entry */
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
+
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
+			       &(fp->state), 1);
+	return rc;
+}
+
+static int bnx2x_stop_leading(struct bnx2x *bp)
+{
+	__le16 dsb_sp_prod_idx;
+	/* if the other port is handling traffic,
+	   this can take a lot of time */
+	int cnt = 500;
+	int rc;
+
+	might_sleep();
+
+	/* Send HALT ramrod */
+	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
+
+	/* Wait for completion */
+	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
+			       &(bp->fp[0].state), 1);
+	if (rc) /* timeout */
+		return rc;
+
+	dsb_sp_prod_idx = *bp->dsb_sp_prod;
+
+	/* Send PORT_DELETE ramrod */
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
+
+	/* Wait for completion to arrive on default status block
+	   we are going to reset the chip anyway
+	   so there is not much to do if this times out
+	 */
+	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
+		if (!cnt) {
+			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
+			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
+			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
+#ifdef BNX2X_STOP_ON_ERROR
+			bnx2x_panic();
+#endif
+			rc = -EBUSY;
+			break;
+		}
+		cnt--;
+		msleep(1);
+		rmb(); /* Refresh the dsb_sp_prod */
+	}
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
+
+	return rc;
+}
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int base, i;
+
+	/* Configure IGU */
+	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+
+#ifdef BCM_CNIC
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+	/*
+	 * Wait for at least 10ms and up to 2 second for the timers scan to
+	 * complete
+	 */
+	for (i = 0; i < 200; i++) {
+		msleep(10);
+		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+			break;
+	}
+#endif
+	/* Clear ILT */
+	base = FUNC_ILT_BASE(func);
+	for (i = base; i < base + ILT_PER_FUNC; i++)
+		bnx2x_ilt_wr(bp, i, 0);
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	/* Do not rcv packets to BRB */
+	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+	/* Do not direct rcv packets that are not for MCP to the BRB */
+	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+	/* Configure AEU */
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+	msleep(100);
+	/* Check for BRB port occupancy */
+	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+	if (val)
+		DP(NETIF_MSG_IFDOWN,
+		   "BRB1 is not empty  %d blocks are occupied\n", val);
+
+	/* TODO: Close Doorbell port? */
+}
+
+static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
+{
+	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
+	   BP_FUNC(bp), reset_code);
+
+	switch (reset_code) {
+	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+		bnx2x_reset_port(bp);
+		bnx2x_reset_func(bp);
+		bnx2x_reset_common(bp);
+		break;
+
+	case FW_MSG_CODE_DRV_UNLOAD_PORT:
+		bnx2x_reset_port(bp);
+		bnx2x_reset_func(bp);
+		break;
+
+	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+		bnx2x_reset_func(bp);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
+		break;
+	}
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+	int port = BP_PORT(bp);
+	u32 reset_code = 0;
+	int i, cnt, rc;
+
+	/* Wait until tx fastpath tasks complete */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		cnt = 1000;
+		while (bnx2x_has_tx_work_unload(fp)) {
+
+			bnx2x_tx_int(fp);
+			if (!cnt) {
+				BNX2X_ERR("timeout waiting for queue[%d]\n",
+					  i);
+#ifdef BNX2X_STOP_ON_ERROR
+				bnx2x_panic();
+				return -EBUSY;
+#else
+				break;
+#endif
+			}
+			cnt--;
+			msleep(1);
+		}
+	}
+	/* Give HW time to discard old tx messages */
+	msleep(1);
+
+	if (CHIP_IS_E1(bp)) {
+		struct mac_configuration_cmd *config =
+						bnx2x_sp(bp, mcast_config);
+
+		bnx2x_set_eth_mac_addr_e1(bp, 0);
+
+		for (i = 0; i < config->hdr.length; i++)
+			CAM_INVALIDATE(config->config_table[i]);
+
+		config->hdr.length = i;
+		if (CHIP_REV_IS_SLOW(bp))
+			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+		else
+			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
+		config->hdr.client_id = bp->fp->cl_id;
+		config->hdr.reserved1 = 0;
+
+		bp->set_mac_pending++;
+		smp_wmb();
+
+		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
+
+	} else { /* E1H */
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+		bnx2x_set_eth_mac_addr_e1h(bp, 0);
+
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+
+		REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
+	}
+#ifdef BCM_CNIC
+	/* Clear iSCSI L2 MAC */
+	mutex_lock(&bp->cnic_mutex);
+	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
+		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
+	}
+	mutex_unlock(&bp->cnic_mutex);
+#endif
+
+	if (unload_mode == UNLOAD_NORMAL)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	else if (bp->flags & NO_WOL_FLAG)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+	else if (bp->wol) {
+		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+		u8 *mac_addr = bp->dev->dev_addr;
+		u32 val;
+		/* The mac address is written to entries 1-4 to
+		   preserve entry 0 which is used by the PMF */
+		u8 entry = (BP_E1HVN(bp) + 1)*8;
+
+		val = (mac_addr[0] << 8) | mac_addr[1];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		      (mac_addr[4] << 8) | mac_addr[5];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+	} else
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	/* Close multi and leading connections
+	   Completions for ramrods are collected in a synchronous way */
+	for_each_nondefault_queue(bp, i)
+		if (bnx2x_stop_multi(bp, i))
+			goto unload_error;
+
+	rc = bnx2x_stop_leading(bp);
+	if (rc) {
+		BNX2X_ERR("Stop leading failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return -EBUSY;
+#else
+		goto unload_error;
+#endif
+	}
+
+unload_error:
+	if (!BP_NOMCP(bp))
+		reset_code = bnx2x_fw_command(bp, reset_code);
+	else {
+		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
+		   load_count[0], load_count[1], load_count[2]);
+		load_count[0]--;
+		load_count[1 + port]--;
+		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
+		   load_count[0], load_count[1], load_count[2]);
+		if (load_count[0] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+		else if (load_count[1 + port] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+		else
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+	}
+
+	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
+	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
+		bnx2x__link_reset(bp);
+
+	/* Reset the chip */
+	bnx2x_reset_chip(bp, reset_code);
+
+	/* Report UNLOAD_DONE to MCP */
+	if (!BP_NOMCP(bp))
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+	u32 val;
+
+	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+	if (CHIP_IS_E1(bp)) {
+		int port = BP_PORT(bp);
+		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+		val = REG_RD(bp, addr);
+		val &= ~(0x300);
+		REG_WR(bp, addr, val);
+	} else if (CHIP_IS_E1H(bp)) {
+		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+	}
+}
+
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+	u32 val, addr;
+
+	/* Gates #2 and #4a are closed/opened for "not E1" only */
+	if (!CHIP_IS_E1(bp)) {
+		/* #4 */
+		val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+		       close ? (val | 0x1) : (val & (~(u32)1)));
+		/* #2 */
+		val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+		       close ? (val | 0x1) : (val & (~(u32)1)));
+	}
+
+	/* #3 */
+	addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	val = REG_RD(bp, addr);
+	REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+
+	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+		close ? "closing" : "opening");
+	mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	/* Do some magic... */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	*magic_val = val & SHARED_MF_CLP_MAGIC;
+	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+	/* Restore the `magic' bit value... */
+	/* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+	SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	MF_CFG_WR(bp, shared_mf_config.clp_mb,
+		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	u32 shmem;
+	u32 validity_offset;
+
+	DP(NETIF_MSG_HW, "Starting\n");
+
+	/* Set `magic' bit in order to save MF config */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_prep(bp, magic_val);
+
+	/* Get shmem offset */
+	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+	/* Clear validity map flags */
+	if (shmem > 0)
+		REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT  100    /* 100 ms */
+
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+	/* special handling for emulation and FPGA,
+	   wait 10 times longer */
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(MCP_ONE_TIMEOUT*10);
+	else
+		msleep(MCP_ONE_TIMEOUT);
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+	u32 shmem, cnt, validity_offset, val;
+	int rc = 0;
+
+	msleep(100);
+
+	/* Get shmem offset */
+	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	if (shmem == 0) {
+		BNX2X_ERR("Shmem 0 return failure\n");
+		rc = -ENOTTY;
+		goto exit_lbl;
+	}
+
+	validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+	/* Wait for MCP to come up */
+	for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+		/* TBD: its best to check validity map of last port.
+		 * currently checks on port 0.
+		 */
+		val = REG_RD(bp, shmem + validity_offset);
+		DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+		   shmem + validity_offset, val);
+
+		/* check that shared memory is valid. */
+		if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		    == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+			break;
+
+		bnx2x_mcp_wait_one(bp);
+	}
+
+	DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
+
+	/* Check that shared memory is valid. This indicates that MCP is up. */
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+	    (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+		BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+		rc = -ENOTTY;
+		goto exit_lbl;
+	}
+
+exit_lbl:
+	/* Restore the `magic' bit value */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_done(bp, magic_val);
+
+	return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+	if (!CHIP_IS_E1(bp)) {
+		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+		REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+		mmiowb();
+	}
+}
+
+/*
+ * Reset the whole chip except for:
+ *      - PCIE core
+ *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ *              one reset bit)
+ *      - IGU
+ *      - MISC (including AEU)
+ *      - GRC
+ *      - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+
+	not_reset_mask1 =
+		MISC_REGISTERS_RESET_REG_1_RST_HC |
+		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+		MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+	not_reset_mask2 =
+		MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+
+	reset_mask1 = 0xffffffff;
+
+	if (CHIP_IS_E1(bp))
+		reset_mask2 = 0xffff;
+	else
+		reset_mask2 = 0x1ffff;
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       reset_mask1 & (~not_reset_mask1));
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       reset_mask2 & (~not_reset_mask2));
+
+	barrier();
+	mmiowb();
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+	mmiowb();
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp)
+{
+	int cnt = 1000;
+	u32 val = 0;
+	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+	/* Empty the Tetris buffer, wait for 1s */
+	do {
+		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+		    ((port_is_idle_0 & 0x1) == 0x1) &&
+		    ((port_is_idle_1 & 0x1) == 0x1) &&
+		    (pgl_exp_rom2 == 0xffffffff))
+			break;
+		msleep(1);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+			  " are still"
+			  " outstanding read requests after 1s!\n");
+		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+			  " port_is_idle_0=0x%08x,"
+			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+			  pgl_exp_rom2);
+		return -EAGAIN;
+	}
+
+	barrier();
+
+	/* Close gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, true);
+
+	/* TBD: Indicate that "process kill" is in progress to MCP */
+
+	/* Clear "unprepared" bit */
+	REG_WR(bp, MISC_REG_UNPREPARED, 0);
+	barrier();
+
+	/* Make sure all is written to the chip before the reset */
+	mmiowb();
+
+	/* Wait for 1ms to empty GLUE and PCI-E core queues,
+	 * PSWHST, GRC and PSWRD Tetris buffer.
+	 */
+	msleep(1);
+
+	/* Prepare to chip reset: */
+	/* MCP */
+	bnx2x_reset_mcp_prep(bp, &val);
+
+	/* PXP */
+	bnx2x_pxp_prep(bp);
+	barrier();
+
+	/* reset the chip */
+	bnx2x_process_kill_chip_reset(bp);
+	barrier();
+
+	/* Recover after reset: */
+	/* MCP */
+	if (bnx2x_reset_mcp_comp(bp, val))
+		return -EAGAIN;
+
+	/* PXP */
+	bnx2x_pxp_prep(bp);
+
+	/* Open the gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, false);
+
+	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+	 * reset state, re-enable attentions. */
+
+	return 0;
+}
+
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+	int rc = 0;
+	/* Try to recover after the failure */
+	if (bnx2x_process_kill(bp)) {
+		printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+		       bp->dev->name);
+		rc = -EAGAIN;
+		goto exit_leader_reset;
+	}
+
+	/* Clear "reset is in progress" bit and update the driver state */
+	bnx2x_set_reset_done(bp);
+	bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+exit_leader_reset:
+	bp->is_leader = 0;
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+	smp_wmb();
+	return rc;
+}
+
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+	DP(NETIF_MSG_HW, "Handling parity\n");
+	while (1) {
+		switch (bp->recovery_state) {
+		case BNX2X_RECOVERY_INIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+			/* Try to get a LEADER_LOCK HW lock */
+			if (bnx2x_trylock_hw_lock(bp,
+				HW_LOCK_RESOURCE_RESERVED_08))
+				bp->is_leader = 1;
+
+			/* Stop the driver */
+			/* If interface has been removed - break */
+			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+				return;
+
+			bp->recovery_state = BNX2X_RECOVERY_WAIT;
+			/* Ensure "is_leader" and "recovery_state"
+			 *  update values are seen on other CPUs
+			 */
+			smp_wmb();
+			break;
+
+		case BNX2X_RECOVERY_WAIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+			if (bp->is_leader) {
+				u32 load_counter = bnx2x_get_load_cnt(bp);
+				if (load_counter) {
+					/* Wait until all other functions get
+					 * down.
+					 */
+					schedule_delayed_work(&bp->reset_task,
+								HZ/10);
+					return;
+				} else {
+					/* If all other functions got down -
+					 * try to bring the chip back to
+					 * normal. In any case it's an exit
+					 * point for a leader.
+					 */
+					if (bnx2x_leader_reset(bp) ||
+					bnx2x_nic_load(bp, LOAD_NORMAL)) {
+						printk(KERN_ERR"%s: Recovery "
+						"has failed. Power cycle is "
+						"needed.\n", bp->dev->name);
+						/* Disconnect this device */
+						netif_device_detach(bp->dev);
+						/* Block ifup for all function
+						 * of this ASIC until
+						 * "process kill" or power
+						 * cycle.
+						 */
+						bnx2x_set_reset_in_progress(bp);
+						/* Shut down the power */
+						bnx2x_set_power_state(bp,
+								PCI_D3hot);
+						return;
+					}
+
+					return;
+				}
+			} else { /* non-leader */
+				if (!bnx2x_reset_is_done(bp)) {
+					/* Try to get a LEADER_LOCK HW lock as
+					 * long as a former leader may have
+					 * been unloaded by the user or
+					 * released a leadership by another
+					 * reason.
+					 */
+					if (bnx2x_trylock_hw_lock(bp,
+					    HW_LOCK_RESOURCE_RESERVED_08)) {
+						/* I'm a leader now! Restart a
+						 * switch case.
+						 */
+						bp->is_leader = 1;
+						break;
+					}
+
+					schedule_delayed_work(&bp->reset_task,
+								HZ/10);
+					return;
+
+				} else { /* A leader has completed
+					  * the "process kill". It's an exit
+					  * point for a non-leader.
+					  */
+					bnx2x_nic_load(bp, LOAD_NORMAL);
+					bp->recovery_state =
+						BNX2X_RECOVERY_DONE;
+					smp_wmb();
+					return;
+				}
+			}
+		default:
+			return;
+		}
+	}
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_reset_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
+		  " so reset not done to allow debug dump,\n"
+	 KERN_ERR " you will need to reboot when done\n");
+	return;
+#endif
+
+	rtnl_lock();
+
+	if (!netif_running(bp->dev))
+		goto reset_task_exit;
+
+	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+		bnx2x_parity_recover(bp);
+	else {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+
+reset_task_exit:
+	rtnl_unlock();
+}
+
+/* end of nic load/unload */
+
+/*
+ * Init service functions
+ */
+
+static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
+{
+	switch (func) {
+	case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
+	case 1:	return PXP2_REG_PGL_PRETEND_FUNC_F1;
+	case 2:	return PXP2_REG_PGL_PRETEND_FUNC_F2;
+	case 3:	return PXP2_REG_PGL_PRETEND_FUNC_F3;
+	case 4:	return PXP2_REG_PGL_PRETEND_FUNC_F4;
+	case 5:	return PXP2_REG_PGL_PRETEND_FUNC_F5;
+	case 6:	return PXP2_REG_PGL_PRETEND_FUNC_F6;
+	case 7:	return PXP2_REG_PGL_PRETEND_FUNC_F7;
+	default:
+		BNX2X_ERR("Unsupported function index: %d\n", func);
+		return (u32)(-1);
+	}
+}
+
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
+{
+	u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
+
+	/* Flush all outstanding writes */
+	mmiowb();
+
+	/* Pretend to be function 0 */
+	REG_WR(bp, reg, 0);
+	/* Flush the GRC transaction (in the chip) */
+	new_val = REG_RD(bp, reg);
+	if (new_val != 0) {
+		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
+			  new_val);
+		BUG();
+	}
+
+	/* From now we are in the "like-E1" mode */
+	bnx2x_int_disable(bp);
+
+	/* Flush all outstanding writes */
+	mmiowb();
+
+	/* Restore the original funtion settings */
+	REG_WR(bp, reg, orig_func);
+	new_val = REG_RD(bp, reg);
+	if (new_val != orig_func) {
+		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
+			  orig_func, new_val);
+		BUG();
+	}
+}
+
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
+{
+	if (CHIP_IS_E1H(bp))
+		bnx2x_undi_int_disable_e1h(bp, func);
+	else
+		bnx2x_int_disable(bp);
+}
+
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+{
+	u32 val;
+
+	/* Check if there is any driver already loaded */
+	val = REG_RD(bp, MISC_REG_UNPREPARED);
+	if (val == 0x1) {
+		/* Check if it is the UNDI driver
+		 * UNDI driver initializes CID offset for normal bell to 0x7
+		 */
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+		if (val == 0x7) {
+			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+			/* save our func */
+			int func = BP_FUNC(bp);
+			u32 swap_en;
+			u32 swap_val;
+
+			/* clear the UNDI indication */
+			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+
+			BNX2X_DEV_INFO("UNDI is active! reset device\n");
+
+			/* try unload UNDI on port 0 */
+			bp->func = 0;
+			bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
+			reset_code = bnx2x_fw_command(bp, reset_code);
+
+			/* if UNDI is loaded on the other port */
+			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+
+				/* send "DONE" for previous unload */
+				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+				/* unload UNDI on port 1 */
+				bp->func = 1;
+				bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+					DRV_MSG_SEQ_NUMBER_MASK);
+				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+				bnx2x_fw_command(bp, reset_code);
+			}
+
+			/* now it's safe to release the lock */
+			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
+			bnx2x_undi_int_disable(bp, func);
+
+			/* close input traffic and wait for it */
+			/* Do not rcv packets to BRB */
+			REG_WR(bp,
+			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+			/* Do not direct rcv packets that are not for MCP to
+			 * the BRB */
+			REG_WR(bp,
+			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
+					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+			/* clear AEU */
+			REG_WR(bp,
+			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+			msleep(10);
+
+			/* save NIG port swap info */
+			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+			/* reset device */
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+			       0xd3ffffff);
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+			       0x1403);
+			/* take the NIG out of reset and restore swap values */
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
+			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+			/* send unload done to the MCP */
+			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+
+			/* restore our func and fw_seq */
+			bp->func = func;
+			bp->fw_seq =
+			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
+
+		} else
+			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+	}
+}
+
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2, val3, val4, id;
+	u16 pmc;
+
+	/* Get the chip revision id and number. */
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+	val = REG_RD(bp, MISC_REG_CHIP_NUM);
+	id = ((val & 0xffff) << 16);
+	val = REG_RD(bp, MISC_REG_CHIP_REV);
+	id |= ((val & 0xf) << 12);
+	val = REG_RD(bp, MISC_REG_CHIP_METAL);
+	id |= ((val & 0xff) << 4);
+	val = REG_RD(bp, MISC_REG_BOND_ID);
+	id |= (val & 0xf);
+	bp->common.chip_id = id;
+	bp->link_params.chip_id = bp->common.chip_id;
+	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+	val = (REG_RD(bp, 0x2874) & 0x55);
+	if ((bp->common.chip_id & 0x1) ||
+	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+		bp->flags |= ONE_PORT_FLAG;
+		BNX2X_DEV_INFO("single port device\n");
+	}
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+	bp->common.flash_size = (NVRAM_1MB_SIZE <<
+				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
+	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+		       bp->common.flash_size, bp->common.flash_size);
+
+	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
+		       bp->common.shmem_base, bp->common.shmem2_base);
+
+	if (!bp->common.shmem_base ||
+	    (bp->common.shmem_base < 0xA0000) ||
+	    (bp->common.shmem_base >= 0xC0000)) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		BNX2X_ERROR("BAD MCP validity signature\n");
+
+	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+	bp->link_params.hw_led_mode = ((bp->common.hw_config &
+					SHARED_HW_CFG_LED_MODE_MASK) >>
+				       SHARED_HW_CFG_LED_MODE_SHIFT);
+
+	bp->link_params.feature_config_flags = 0;
+	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+		bp->link_params.feature_config_flags |=
+				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+	else
+		bp->link_params.feature_config_flags &=
+				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+	bp->common.bc_ver = val;
+	BNX2X_DEV_INFO("bc_ver %X\n", val);
+	if (val < BNX2X_BC_VER) {
+		/* for now only warn
+		 * later we might need to enforce this */
+		BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+			    "please upgrade BC\n", BNX2X_BC_VER, val);
+	}
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
+		FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+	if (BP_E1HVN(bp) == 0) {
+		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+	} else {
+		/* no WOL capability for E1HVN != 0 */
+		bp->flags |= NO_WOL_FLAG;
+	}
+	BNX2X_DEV_INFO("%sWoL capable\n",
+		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+		 val, val2, val3, val4);
+}
+
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+						    u32 switch_cfg)
+{
+	int port = BP_PORT(bp);
+	u32 ext_phy_type;
+
+	switch (switch_cfg) {
+	case SWITCH_CFG_1G:
+		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
+
+		ext_phy_type =
+			SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10baseT_Half |
+					       SUPPORTED_10baseT_Full |
+					       SUPPORTED_100baseT_Half |
+					       SUPPORTED_100baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_2500baseX_Full |
+					       SUPPORTED_TP |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10baseT_Half |
+					       SUPPORTED_10baseT_Full |
+					       SUPPORTED_100baseT_Half |
+					       SUPPORTED_100baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_TP |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		default:
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD SerDes ext_phy_config 0x%x\n",
+				  bp->link_params.ext_phy_config);
+			return;
+		}
+
+		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
+					   port*0x10);
+		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+		break;
+
+	case SWITCH_CFG_10G:
+		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
+
+		ext_phy_type =
+			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+		switch (ext_phy_type) {
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10baseT_Half |
+					       SUPPORTED_10baseT_Full |
+					       SUPPORTED_100baseT_Half |
+					       SUPPORTED_100baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_2500baseX_Full |
+					       SUPPORTED_10000baseT_Full |
+					       SUPPORTED_TP |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_2500baseX_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_FIBRE |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10000baseT_Full |
+					       SUPPORTED_TP |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+			BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
+				       ext_phy_type);
+
+			bp->port.supported |= (SUPPORTED_10baseT_Half |
+					       SUPPORTED_10baseT_Full |
+					       SUPPORTED_100baseT_Half |
+					       SUPPORTED_100baseT_Full |
+					       SUPPORTED_1000baseT_Full |
+					       SUPPORTED_10000baseT_Full |
+					       SUPPORTED_TP |
+					       SUPPORTED_Autoneg |
+					       SUPPORTED_Pause |
+					       SUPPORTED_Asym_Pause);
+			break;
+
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
+				  bp->link_params.ext_phy_config);
+			break;
+
+		default:
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD XGXS ext_phy_config 0x%x\n",
+				  bp->link_params.ext_phy_config);
+			return;
+		}
+
+		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
+					   port*0x18);
+		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+
+		break;
+
+	default:
+		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+			  bp->port.link_config);
+		return;
+	}
+	bp->link_params.phy_addr = bp->port.phy_addr;
+
+	/* mask what we support according to speed_cap_mask */
+	if (!(bp->link_params.speed_cap_mask &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+		bp->port.supported &= ~SUPPORTED_10baseT_Half;
+
+	if (!(bp->link_params.speed_cap_mask &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+		bp->port.supported &= ~SUPPORTED_10baseT_Full;
+
+	if (!(bp->link_params.speed_cap_mask &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+		bp->port.supported &= ~SUPPORTED_100baseT_Half;
+
+	if (!(bp->link_params.speed_cap_mask &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+		bp->port.supported &= ~SUPPORTED_100baseT_Full;
+
+	if (!(bp->link_params.speed_cap_mask &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+		bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
+					SUPPORTED_1000baseT_Full);
+
+	if (!(bp->link_params.speed_cap_mask &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+		bp->port.supported &= ~SUPPORTED_2500baseX_Full;
+
+	if (!(bp->link_params.speed_cap_mask &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+		bp->port.supported &= ~SUPPORTED_10000baseT_Full;
+
+	BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
+}
+
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+	bp->link_params.req_duplex = DUPLEX_FULL;
+
+	switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+	case PORT_FEATURE_LINK_SPEED_AUTO:
+		if (bp->port.supported & SUPPORTED_Autoneg) {
+			bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+			bp->port.advertising = bp->port.supported;
+		} else {
+			u32 ext_phy_type =
+			    XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+
+			if ((ext_phy_type ==
+			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
+			    (ext_phy_type ==
+			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
+				/* force 10G, no AN */
+				bp->link_params.req_line_speed = SPEED_10000;
+				bp->port.advertising =
+						(ADVERTISED_10000baseT_Full |
+						 ADVERTISED_FIBRE);
+				break;
+			}
+			BNX2X_ERR("NVRAM config error. "
+				  "Invalid link_config 0x%x"
+				  "  Autoneg not supported\n",
+				  bp->port.link_config);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10M_FULL:
+		if (bp->port.supported & SUPPORTED_10baseT_Full) {
+			bp->link_params.req_line_speed = SPEED_10;
+			bp->port.advertising = (ADVERTISED_10baseT_Full |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10M_HALF:
+		if (bp->port.supported & SUPPORTED_10baseT_Half) {
+			bp->link_params.req_line_speed = SPEED_10;
+			bp->link_params.req_duplex = DUPLEX_HALF;
+			bp->port.advertising = (ADVERTISED_10baseT_Half |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_100M_FULL:
+		if (bp->port.supported & SUPPORTED_100baseT_Full) {
+			bp->link_params.req_line_speed = SPEED_100;
+			bp->port.advertising = (ADVERTISED_100baseT_Full |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_100M_HALF:
+		if (bp->port.supported & SUPPORTED_100baseT_Half) {
+			bp->link_params.req_line_speed = SPEED_100;
+			bp->link_params.req_duplex = DUPLEX_HALF;
+			bp->port.advertising = (ADVERTISED_100baseT_Half |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_1G:
+		if (bp->port.supported & SUPPORTED_1000baseT_Full) {
+			bp->link_params.req_line_speed = SPEED_1000;
+			bp->port.advertising = (ADVERTISED_1000baseT_Full |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_2_5G:
+		if (bp->port.supported & SUPPORTED_2500baseX_Full) {
+			bp->link_params.req_line_speed = SPEED_2500;
+			bp->port.advertising = (ADVERTISED_2500baseX_Full |
+						ADVERTISED_TP);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	case PORT_FEATURE_LINK_SPEED_10G_CX4:
+	case PORT_FEATURE_LINK_SPEED_10G_KX4:
+	case PORT_FEATURE_LINK_SPEED_10G_KR:
+		if (bp->port.supported & SUPPORTED_10000baseT_Full) {
+			bp->link_params.req_line_speed = SPEED_10000;
+			bp->port.advertising = (ADVERTISED_10000baseT_Full |
+						ADVERTISED_FIBRE);
+		} else {
+			BNX2X_ERROR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    bp->port.link_config,
+				    bp->link_params.speed_cap_mask);
+			return;
+		}
+		break;
+
+	default:
+		BNX2X_ERROR("NVRAM config error. "
+			    "BAD link speed link_config 0x%x\n",
+			    bp->port.link_config);
+		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+		bp->port.advertising = bp->port.supported;
+		break;
+	}
+
+	bp->link_params.req_flow_ctrl = (bp->port.link_config &
+					 PORT_FEATURE_FLOW_CONTROL_MASK);
+	if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
+	    !(bp->port.supported & SUPPORTED_Autoneg))
+		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
+		       "  advertising 0x%x\n",
+		       bp->link_params.req_line_speed,
+		       bp->link_params.req_duplex,
+		       bp->link_params.req_flow_ctrl, bp->port.advertising);
+}
+
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+	mac_hi = cpu_to_be16(mac_hi);
+	mac_lo = cpu_to_be32(mac_lo);
+	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val, val2;
+	u32 config;
+	u16 i;
+	u32 ext_phy_type;
+
+	bp->link_params.bp = bp;
+	bp->link_params.port = port;
+
+	bp->link_params.lane_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+	bp->link_params.ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+	/* BCM8727_NOC => BCM8727 no over current */
+	if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+	    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
+		bp->link_params.ext_phy_config &=
+			~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+		bp->link_params.ext_phy_config |=
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
+		bp->link_params.feature_config_flags |=
+			FEATURE_CONFIG_BCM8727_NOC;
+	}
+
+	bp->link_params.speed_cap_mask =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask);
+
+	bp->port.link_config =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+	/* Get the 4 lanes xgxs config rx and tx */
+	for (i = 0; i < 2; i++) {
+		val = SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
+		bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
+		bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
+
+		val = SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
+		bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
+		bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
+	}
+
+	/* If the device is capable of WoL, set the default state according
+	 * to the HW
+	 */
+	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+		   (config & PORT_FEATURE_WOL_ENABLED));
+
+	BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
+		       "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
+		       bp->link_params.lane_config,
+		       bp->link_params.ext_phy_config,
+		       bp->link_params.speed_cap_mask, bp->port.link_config);
+
+	bp->link_params.switch_cfg |= (bp->port.link_config &
+				       PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+	bnx2x_link_settings_requested(bp);
+
+	/*
+	 * If connected directly, work with the internal PHY, otherwise, work
+	 * with the external PHY
+	 */
+	ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bp->mdio.prtad = bp->link_params.phy_addr;
+
+	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+		bp->mdio.prtad =
+			XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+
+	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
+	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
+	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+#endif
+}
+
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	u32 val, val2;
+	int rc = 0;
+
+	bnx2x_get_common_hwinfo(bp);
+
+	bp->e1hov = 0;
+	bp->e1hmf = 0;
+	if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
+		bp->mf_config =
+			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+
+		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
+		       FUNC_MF_CFG_E1HOV_TAG_MASK);
+		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+			bp->e1hmf = 1;
+		BNX2X_DEV_INFO("%s function mode\n",
+			       IS_E1HMF(bp) ? "multi" : "single");
+
+		if (IS_E1HMF(bp)) {
+			val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
+								e1hov_tag) &
+			       FUNC_MF_CFG_E1HOV_TAG_MASK);
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+				bp->e1hov = val;
+				BNX2X_DEV_INFO("E1HOV for func %d is %d "
+					       "(0x%04x)\n",
+					       func, bp->e1hov, bp->e1hov);
+			} else {
+				BNX2X_ERROR("No valid E1HOV for func %d,"
+					    "  aborting\n", func);
+				rc = -EPERM;
+			}
+		} else {
+			if (BP_E1HVN(bp)) {
+				BNX2X_ERROR("VN %d in single function mode,"
+					    "  aborting\n", BP_E1HVN(bp));
+				rc = -EPERM;
+			}
+		}
+	}
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_get_port_hwinfo(bp);
+
+		bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
+			      DRV_MSG_SEQ_NUMBER_MASK);
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+	}
+
+	if (IS_E1HMF(bp)) {
+		val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
+		val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
+		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
+			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
+			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
+			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
+			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
+			bp->dev->dev_addr[5] = (u8)(val & 0xff);
+			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
+			       ETH_ALEN);
+			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
+			       ETH_ALEN);
+		}
+
+		return rc;
+	}
+
+	if (BP_NOMCP(bp)) {
+		/* only supposed to happen on emulation/FPGA */
+		BNX2X_ERROR("warning: random MAC workaround active\n");
+		random_ether_addr(bp->dev->dev_addr);
+		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+	}
+
+	return rc;
+}
+
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+	int cnt, i, block_end, rodi;
+	char vpd_data[BNX2X_VPD_LEN+1];
+	char str_id_reg[VENDOR_ID_LEN+1];
+	char str_id_cap[VENDOR_ID_LEN+1];
+	u8 len;
+
+	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+	if (cnt < BNX2X_VPD_LEN)
+		goto out_not_found;
+
+	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+			     PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto out_not_found;
+
+
+	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+		    pci_vpd_lrdt_size(&vpd_data[i]);
+
+	i += PCI_VPD_LRDT_TAG_SIZE;
+
+	if (block_end > BNX2X_VPD_LEN)
+		goto out_not_found;
+
+	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+				   PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (rodi < 0)
+		goto out_not_found;
+
+	len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+	if (len != VENDOR_ID_LEN)
+		goto out_not_found;
+
+	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+	/* vendor specific info */
+	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+						PCI_VPD_RO_KEYWORD_VENDOR0);
+		if (rodi >= 0) {
+			len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+				memcpy(bp->fw_ver, &vpd_data[rodi], len);
+				bp->fw_ver[len] = ' ';
+			}
+		}
+		return;
+	}
+out_not_found:
+	return;
+}
+
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	int timer_interval;
+	int rc;
+
+	/* Disable interrupt handling until HW is initialized */
+	atomic_set(&bp->intr_sem, 1);
+	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
+
+	mutex_init(&bp->port.phy_mutex);
+	mutex_init(&bp->fw_mb_mutex);
+	spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_mutex);
+#endif
+
+	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+	INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
+
+	rc = bnx2x_get_hwinfo(bp);
+
+	bnx2x_read_fwinfo(bp);
+	/* need to reset chip if undi was active */
+	if (!BP_NOMCP(bp))
+		bnx2x_undi_unload(bp);
+
+	if (CHIP_REV_IS_FPGA(bp))
+		dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+	if (BP_NOMCP(bp) && (func == 0))
+		dev_err(&bp->pdev->dev, "MCP disabled, "
+					"must load devices in order!\n");
+
+	/* Set multi queue mode */
+	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
+	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
+		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+					"requested is not MSI-X\n");
+		multi_mode = ETH_RSS_MODE_DISABLED;
+	}
+	bp->multi_mode = multi_mode;
+	bp->int_mode = int_mode;
+
+	bp->dev->features |= NETIF_F_GRO;
+
+	/* Set TPA flags */
+	if (disable_tpa) {
+		bp->flags &= ~TPA_ENABLE_FLAG;
+		bp->dev->features &= ~NETIF_F_LRO;
+	} else {
+		bp->flags |= TPA_ENABLE_FLAG;
+		bp->dev->features |= NETIF_F_LRO;
+	}
+	bp->disable_tpa = disable_tpa;
+
+	if (CHIP_IS_E1(bp))
+		bp->dropless_fc = 0;
+	else
+		bp->dropless_fc = dropless_fc;
+
+	bp->mrrs = mrrs;
+
+	bp->tx_ring_size = MAX_TX_AVAIL;
+	bp->rx_ring_size = MAX_RX_AVAIL;
+
+	bp->rx_csum = 1;
+
+	/* make sure that the numbers are in the right granularity */
+	bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+	bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+
+	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+	bp->current_interval = (poll ? poll : timer_interval);
+
+	init_timer(&bp->timer);
+	bp->timer.expires = jiffies + bp->current_interval;
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2x_timer;
+
+	return rc;
+}
+
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	netif_carrier_off(dev);
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	if (!bnx2x_reset_is_done(bp)) {
+		do {
+			/* Reset MCP mail box sequence if there is on going
+			 * recovery
+			 */
+			bp->fw_seq = 0;
+
+			/* If it's the first function to load and reset done
+			 * is still not cleared it may mean that. We don't
+			 * check the attention state here because it may have
+			 * already been cleared by a "common" reset but we
+			 * shell proceed with "process kill" anyway.
+			 */
+			if ((bnx2x_get_load_cnt(bp) == 0) &&
+				bnx2x_trylock_hw_lock(bp,
+				HW_LOCK_RESOURCE_RESERVED_08) &&
+				(!bnx2x_leader_reset(bp))) {
+				DP(NETIF_MSG_HW, "Recovered in open\n");
+				break;
+			}
+
+			bnx2x_set_power_state(bp, PCI_D3hot);
+
+			printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+			" completed yet. Try again later. If u still see this"
+			" message after a few retries then power cycle is"
+			" required.\n", bp->dev->name);
+
+			return -EAGAIN;
+		} while (0);
+	}
+
+	bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+	return bnx2x_nic_load(bp, LOAD_OPEN);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Unload the driver, release IRQs */
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	return 0;
+}
+
+/* called with netif_tx_lock from dev_mcast.c */
+void bnx2x_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+	int port = BP_PORT(bp);
+
+	if (bp->state != BNX2X_STATE_OPEN) {
+		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+		return;
+	}
+
+	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+	if (dev->flags & IFF_PROMISC)
+		rx_mode = BNX2X_RX_MODE_PROMISC;
+
+	else if ((dev->flags & IFF_ALLMULTI) ||
+		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+		  CHIP_IS_E1(bp)))
+		rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+	else { /* some multicasts */
+		if (CHIP_IS_E1(bp)) {
+			int i, old, offset;
+			struct netdev_hw_addr *ha;
+			struct mac_configuration_cmd *config =
+						bnx2x_sp(bp, mcast_config);
+
+			i = 0;
+			netdev_for_each_mc_addr(ha, dev) {
+				config->config_table[i].
+					cam_entry.msb_mac_addr =
+					swab16(*(u16 *)&ha->addr[0]);
+				config->config_table[i].
+					cam_entry.middle_mac_addr =
+					swab16(*(u16 *)&ha->addr[2]);
+				config->config_table[i].
+					cam_entry.lsb_mac_addr =
+					swab16(*(u16 *)&ha->addr[4]);
+				config->config_table[i].cam_entry.flags =
+							cpu_to_le16(port);
+				config->config_table[i].
+					target_table_entry.flags = 0;
+				config->config_table[i].target_table_entry.
+					clients_bit_vector =
+						cpu_to_le32(1 << BP_L_ID(bp));
+				config->config_table[i].
+					target_table_entry.vlan_id = 0;
+
+				DP(NETIF_MSG_IFUP,
+				   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
+				   config->config_table[i].
+						cam_entry.msb_mac_addr,
+				   config->config_table[i].
+						cam_entry.middle_mac_addr,
+				   config->config_table[i].
+						cam_entry.lsb_mac_addr);
+				i++;
+			}
+			old = config->hdr.length;
+			if (old > i) {
+				for (; i < old; i++) {
+					if (CAM_IS_INVALID(config->
+							   config_table[i])) {
+						/* already invalidated */
+						break;
+					}
+					/* invalidate */
+					CAM_INVALIDATE(config->
+						       config_table[i]);
+				}
+			}
+
+			if (CHIP_REV_IS_SLOW(bp))
+				offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+			else
+				offset = BNX2X_MAX_MULTICAST*(1 + port);
+
+			config->hdr.length = i;
+			config->hdr.offset = offset;
+			config->hdr.client_id = bp->fp->cl_id;
+			config->hdr.reserved1 = 0;
+
+			bp->set_mac_pending++;
+			smp_wmb();
+
+			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
+				      0);
+		} else { /* E1H */
+			/* Accept one or more multicasts */
+			struct netdev_hw_addr *ha;
+			u32 mc_filter[MC_HASH_SIZE];
+			u32 crc, bit, regidx;
+			int i;
+
+			memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+			netdev_for_each_mc_addr(ha, dev) {
+				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+				   ha->addr);
+
+				crc = crc32c_le(0, ha->addr, ETH_ALEN);
+				bit = (crc >> 24) & 0xff;
+				regidx = bit >> 5;
+				bit &= 0x1f;
+				mc_filter[regidx] |= (1 << bit);
+			}
+
+			for (i = 0; i < MC_HASH_SIZE; i++)
+				REG_WR(bp, MC_HASH_OFFSET(bp, i),
+				       mc_filter[i]);
+		}
+	}
+
+	bp->rx_mode = rx_mode;
+	bnx2x_set_storm_rx_mode(bp);
+}
+
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+			   int devad, u16 addr)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 value;
+	int rc;
+	u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+
+	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+	   prtad, devad, addr);
+
+	if (prtad != bp->mdio.prtad) {
+		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
+		   prtad, bp->mdio.prtad);
+		return -EINVAL;
+	}
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
+			     devad, addr, &value);
+	bnx2x_release_phy_lock(bp);
+	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+	if (!rc)
+		rc = value;
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+			    u16 addr, u16 value)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+	int rc;
+
+	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
+			   " value 0x%x\n", prtad, devad, addr, value);
+
+	if (prtad != bp->mdio.prtad) {
+		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
+		   prtad, bp->mdio.prtad);
+		return -EINVAL;
+	}
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
+			      devad, addr, value);
+	bnx2x_release_phy_lock(bp);
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct mii_ioctl_data *mdio = if_mii(ifr);
+
+	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+	   mdio->phy_id, mdio->reg_num, mdio->val_in);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	disable_irq(bp->pdev->irq);
+	bnx2x_interrupt(bp->pdev->irq, dev);
+	enable_irq(bp->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+	.ndo_open		= bnx2x_open,
+	.ndo_stop		= bnx2x_close,
+	.ndo_start_xmit		= bnx2x_start_xmit,
+	.ndo_set_multicast_list	= bnx2x_set_rx_mode,
+	.ndo_set_mac_address	= bnx2x_change_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= bnx2x_ioctl,
+	.ndo_change_mtu		= bnx2x_change_mtu,
+	.ndo_tx_timeout		= bnx2x_tx_timeout,
+#ifdef BCM_VLAN
+	.ndo_vlan_rx_register	= bnx2x_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_bnx2x,
+#endif
+};
+
+static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
+				    struct net_device *dev)
+{
+	struct bnx2x *bp;
+	int rc;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	bp = netdev_priv(dev);
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+	bp->flags = 0;
+	bp->func = PCI_FUNC(pdev->devfn);
+
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&bp->pdev->dev,
+			"Cannot enable PCI device, aborting\n");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+		       " base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (atomic_read(&pdev->enable_cnt) == 1) {
+		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+		if (rc) {
+			dev_err(&bp->pdev->dev,
+				"Cannot obtain PCI resources, aborting\n");
+			goto err_out_disable;
+		}
+
+		pci_set_master(pdev);
+		pci_save_state(pdev);
+	}
+
+	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (bp->pm_cap == 0) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find power management capability, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (bp->pcie_cap == 0) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find PCI Express capability, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
+		bp->flags |= USING_DAC_FLAG;
+		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+			dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
+			       " failed, aborting\n");
+			rc = -EIO;
+			goto err_out_release;
+		}
+
+	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(&bp->pdev->dev,
+			"System does not support DMA, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	dev->mem_start = pci_resource_start(pdev, 0);
+	dev->base_addr = dev->mem_start;
+	dev->mem_end = pci_resource_end(pdev, 0);
+
+	dev->irq = pdev->irq;
+
+	bp->regview = pci_ioremap_bar(pdev, 0);
+	if (!bp->regview) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+					min_t(u64, BNX2X_DB_SIZE,
+					      pci_resource_len(pdev, 2)));
+	if (!bp->doorbells) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map doorbell space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_unmap;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	/* clean indirect addresses */
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+
+	/* Reset the load counter */
+	bnx2x_clear_load_cnt(bp);
+
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	dev->netdev_ops = &bnx2x_netdev_ops;
+	bnx2x_set_ethtool_ops(dev);
+	dev->features |= NETIF_F_SG;
+	dev->features |= NETIF_F_HW_CSUM;
+	if (bp->flags & USING_DAC_FLAG)
+		dev->features |= NETIF_F_HIGHDMA;
+	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+	dev->features |= NETIF_F_TSO6;
+#ifdef BCM_VLAN
+	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
+
+	dev->vlan_features |= NETIF_F_SG;
+	dev->vlan_features |= NETIF_F_HW_CSUM;
+	if (bp->flags & USING_DAC_FLAG)
+		dev->vlan_features |= NETIF_F_HIGHDMA;
+	dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+	dev->vlan_features |= NETIF_F_TSO6;
+#endif
+
+	/* get_port_hwinfo() will set prtad and mmds properly */
+	bp->mdio.prtad = MDIO_PRTAD_NONE;
+	bp->mdio.mmds = 0;
+	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	bp->mdio.dev = dev;
+	bp->mdio.mdio_read = bnx2x_mdio_read;
+	bp->mdio.mdio_write = bnx2x_mdio_write;
+
+	return 0;
+
+err_out_unmap:
+	if (bp->regview) {
+		iounmap(bp->regview);
+		bp->regview = NULL;
+	}
+	if (bp->doorbells) {
+		iounmap(bp->doorbells);
+		bp->doorbells = NULL;
+	}
+
+err_out_release:
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+err_out:
+	return rc;
+}
+
+static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
+						 int *width, int *speed)
+{
+	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+
+	/* return value of 1=2.5GHz 2=5GHz */
+	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+}
+
+static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
+{
+	const struct firmware *firmware = bp->firmware;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	struct bnx2x_fw_file_section *sections;
+	u32 offset, len, num_ops;
+	u16 *ops_offsets;
+	int i;
+	const u8 *fw_ver;
+
+	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+		return -EINVAL;
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+	sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+	/* Make sure none of the offsets and sizes make us read beyond
+	 * the end of the firmware data */
+	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+		offset = be32_to_cpu(sections[i].offset);
+		len = be32_to_cpu(sections[i].len);
+		if (offset + len > firmware->size) {
+			dev_err(&bp->pdev->dev,
+				"Section %d length is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Likewise for the init_ops offsets */
+	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+	ops_offsets = (u16 *)(firmware->data + offset);
+	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+			dev_err(&bp->pdev->dev,
+				"Section offset %d is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Check FW version */
+	offset = be32_to_cpu(fw_hdr->fw_version.offset);
+	fw_ver = firmware->data + offset;
+	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+		dev_err(&bp->pdev->dev,
+			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+		       fw_ver[0], fw_ver[1], fw_ver[2],
+		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+		       BCM_5710_FW_MINOR_VERSION,
+		       BCM_5710_FW_REVISION_VERSION,
+		       BCM_5710_FW_ENGINEERING_VERSION);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	u32 *target = (u32 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/4; i++)
+		target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+   Ops array is stored in the following format:
+   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	struct raw_op *target = (struct raw_op *)_target;
+	u32 i, j, tmp;
+
+	for (i = 0, j = 0; i < n/8; i++, j += 2) {
+		tmp = be32_to_cpu(source[j]);
+		target[i].op = (tmp >> 24) & 0xff;
+		target[i].offset = tmp & 0xffffff;
+		target[i].raw_data = be32_to_cpu(source[j + 1]);
+	}
+}
+
+static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be16 *source = (const __be16 *)_source;
+	u16 *target = (u16 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/2; i++)
+		target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
+do {									\
+	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
+	bp->arr = kmalloc(len, GFP_KERNEL);				\
+	if (!bp->arr) {							\
+		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+		goto lbl;						\
+	}								\
+	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
+	     (u8 *)bp->arr, len);					\
+} while (0)
+
+static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
+{
+	const char *fw_file_name;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	int rc;
+
+	if (CHIP_IS_E1(bp))
+		fw_file_name = FW_FILE_NAME_E1;
+	else if (CHIP_IS_E1H(bp))
+		fw_file_name = FW_FILE_NAME_E1H;
+	else {
+		dev_err(dev, "Unsupported chip revision\n");
+		return -EINVAL;
+	}
+
+	dev_info(dev, "Loading %s\n", fw_file_name);
+
+	rc = request_firmware(&bp->firmware, fw_file_name, dev);
+	if (rc) {
+		dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	rc = bnx2x_check_firmware(bp);
+	if (rc) {
+		dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+	/* Initialize the pointers to the init arrays */
+	/* Blob */
+	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+	/* Opcodes */
+	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+	/* Offsets */
+	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+			    be16_to_cpu_n);
+
+	/* STORMs firmware */
+	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_pram_data.offset);
+	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_pram_data.offset);
+
+	return 0;
+
+init_offsets_alloc_err:
+	kfree(bp->init_ops);
+init_ops_alloc_err:
+	kfree(bp->init_data);
+request_firmware_exit:
+	release_firmware(bp->firmware);
+
+	return rc;
+}
+
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	struct net_device *dev = NULL;
+	struct bnx2x *bp;
+	int pcie_width, pcie_speed;
+	int rc;
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
+	if (!dev) {
+		dev_err(&pdev->dev, "Cannot allocate net device\n");
+		return -ENOMEM;
+	}
+
+	bp = netdev_priv(dev);
+	bp->msg_enable = debug;
+
+	pci_set_drvdata(pdev, dev);
+
+	rc = bnx2x_init_dev(pdev, dev);
+	if (rc < 0) {
+		free_netdev(dev);
+		return rc;
+	}
+
+	rc = bnx2x_init_bp(bp);
+	if (rc)
+		goto init_one_exit;
+
+	/* Set init arrays */
+	rc = bnx2x_init_firmware(bp, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Error loading firmware\n");
+		goto init_one_exit;
+	}
+
+	rc = register_netdev(dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto init_one_exit;
+	}
+
+	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+	       " IRQ %d, ", board_info[ent->driver_data].name,
+	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+	       pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+	       dev->base_addr, bp->pdev->irq);
+	pr_cont("node addr %pM\n", dev->dev_addr);
+
+	return 0;
+
+init_one_exit:
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (bp->doorbells)
+		iounmap(bp->doorbells);
+
+	free_netdev(dev);
+
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	return rc;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return;
+	}
+	bp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+
+	/* Make sure RESET task is not scheduled before continuing */
+	cancel_delayed_work_sync(&bp->reset_task);
+
+	kfree(bp->init_ops_offsets);
+	kfree(bp->init_ops);
+	kfree(bp->init_data);
+	release_firmware(bp->firmware);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (bp->doorbells)
+		iounmap(bp->doorbells);
+
+	free_netdev(dev);
+
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+	int i;
+
+	bp->state = BNX2X_STATE_ERROR;
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	bnx2x_netif_stop(bp, 0);
+	netif_carrier_off(bp->dev);
+
+	del_timer_sync(&bp->timer);
+	bp->stats_state = STATS_STATE_DISABLED;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp, false);
+
+	if (CHIP_IS_E1(bp)) {
+		struct mac_configuration_cmd *config =
+						bnx2x_sp(bp, mcast_config);
+
+		for (i = 0; i < config->hdr.length; i++)
+			CAM_INVALIDATE(config->config_table[i]);
+	}
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+	for_each_queue(bp, i)
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+
+	return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+	u32 val;
+
+	mutex_init(&bp->port.phy_mutex);
+
+	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+	if (!bp->common.shmem_base ||
+	    (bp->common.shmem_base < 0xA0000) ||
+	    (bp->common.shmem_base >= 0xC0000)) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		BNX2X_ERR("BAD MCP validity signature\n");
+
+	if (!BP_NOMCP(bp)) {
+		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
+			      & DRV_MSG_SEQ_NUMBER_MASK);
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+	}
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	netif_device_detach(dev);
+
+	if (state == pci_channel_io_perm_failure) {
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (netif_running(dev))
+		bnx2x_eeh_nic_unload(bp);
+
+	pci_disable_device(pdev);
+
+	rtnl_unlock();
+
+	/* Request a slot reset */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset\n");
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+
+	if (netif_running(dev))
+		bnx2x_set_power_state(bp, PCI_D0);
+
+	rtnl_unlock();
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return;
+	}
+
+	rtnl_lock();
+
+	bnx2x_eeh_recover(bp);
+
+	if (netif_running(dev))
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+
+	netif_device_attach(dev);
+
+	rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2x_err_handler = {
+	.error_detected = bnx2x_io_error_detected,
+	.slot_reset     = bnx2x_io_slot_reset,
+	.resume         = bnx2x_io_resume,
+};
+
+static struct pci_driver bnx2x_pci_driver = {
+	.name        = DRV_MODULE_NAME,
+	.id_table    = bnx2x_pci_tbl,
+	.probe       = bnx2x_init_one,
+	.remove      = __devexit_p(bnx2x_remove_one),
+	.suspend     = bnx2x_suspend,
+	.resume      = bnx2x_resume,
+	.err_handler = &bnx2x_err_handler,
+};
+
+static int __init bnx2x_init(void)
+{
+	int ret;
+
+	pr_info("%s", version);
+
+	bnx2x_wq = create_singlethread_workqueue("bnx2x");
+	if (bnx2x_wq == NULL) {
+		pr_err("Cannot create workqueue\n");
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&bnx2x_pci_driver);
+	if (ret) {
+		pr_err("Cannot register driver\n");
+		destroy_workqueue(bnx2x_wq);
+	}
+	return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+	pci_unregister_driver(&bnx2x_pci_driver);
+
+	destroy_workqueue(bnx2x_wq);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+#ifdef BCM_CNIC
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+	struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+	bp->cnic_spq_pending -= count;
+
+	for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
+	     bp->cnic_spq_pending++) {
+
+		if (!bp->cnic_kwq_pending)
+			break;
+
+		spe = bnx2x_sp_get_next(bp);
+		*spe = *bp->cnic_kwq_cons;
+
+		bp->cnic_kwq_pending--;
+
+		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+			bp->cnic_kwq_cons = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_cons++;
+	}
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+			       struct kwqe_16 *kwqes[], u32 count)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EIO;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	for (i = 0; i < count; i++) {
+		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+			break;
+
+		*bp->cnic_kwq_prod = *spe;
+
+		bp->cnic_kwq_pending++;
+
+		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
+		   spe->data.mac_config_addr.hi,
+		   spe->data.mac_config_addr.lo,
+		   bp->cnic_kwq_pending);
+
+		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+			bp->cnic_kwq_prod = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_prod++;
+	}
+
+	spin_unlock_bh(&bp->spq_lock);
+
+	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+		bnx2x_cnic_sp_post(bp, 0);
+
+	return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	mutex_lock(&bp->cnic_mutex);
+	c_ops = bp->cnic_ops;
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	mutex_unlock(&bp->cnic_mutex);
+
+	return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	ctl.cmd = cmd;
+
+	return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
+{
+	struct cnic_ctl_info ctl;
+
+	/* first we tell CNIC and only then we count this as a completion */
+	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+	ctl.data.comp.cid = cid;
+
+	bnx2x_cnic_ctl_send_bh(bp, &ctl);
+	bnx2x_cnic_sp_post(bp, 1);
+}
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (ctl->cmd) {
+	case DRV_CTL_CTXTBL_WR_CMD: {
+		u32 index = ctl->data.io.offset;
+		dma_addr_t addr = ctl->data.io.dma_addr;
+
+		bnx2x_ilt_wr(bp, index, addr);
+		break;
+	}
+
+	case DRV_CTL_COMPLETION_CMD: {
+		int count = ctl->data.comp.comp_count;
+
+		bnx2x_cnic_sp_post(bp, count);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_START_L2_CMD: {
+		u32 cli = ctl->data.ring.client_id;
+
+		bp->rx_mode_cl_mask |= (1 << cli);
+		bnx2x_set_storm_rx_mode(bp);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_STOP_L2_CMD: {
+		u32 cli = ctl->data.ring.client_id;
+
+		bp->rx_mode_cl_mask &= ~(1 << cli);
+		bnx2x_set_storm_rx_mode(bp);
+		break;
+	}
+
+	default:
+		BNX2X_ERR("unknown command %x\n", ctl->cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (bp->flags & USING_MSIX_FLAG) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+		cp->irq_arr[0].vector = bp->msix_table[1].vector;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+	cp->irq_arr[0].status_blk = bp->cnic_sb;
+	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
+	cp->irq_arr[1].status_blk = bp->def_status_blk;
+	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+
+	cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			       void *data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		return -EBUSY;
+
+	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bp->cnic_kwq)
+		return -ENOMEM;
+
+	bp->cnic_kwq_cons = bp->cnic_kwq;
+	bp->cnic_kwq_prod = bp->cnic_kwq;
+	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+	bp->cnic_spq_pending = 0;
+	bp->cnic_kwq_pending = 0;
+
+	bp->cnic_data = data;
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
+
+	bnx2x_setup_cnic_irq_info(bp);
+	bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+	bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_mutex);
+	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
+		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
+		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+	}
+	cp->drv_state = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_mutex);
+	synchronize_rcu();
+	kfree(bp->cnic_kwq);
+	bp->cnic_kwq = NULL;
+
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = CHIP_ID(bp);
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->io_base2 = bp->doorbells;
+	cp->max_kwqe_pending = 8;
+	cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
+	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
+	cp->ctx_tbl_len = CNIC_ILT_LINES;
+	cp->starting_cid = BCM_CNIC_CID_START;
+	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_register_cnic = bnx2x_register_cnic;
+	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+
+	return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
+
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
similarity index 100%
rename from drivers/net/bnx2x_reg.h
rename to drivers/net/bnx2x/bnx2x_reg.h
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644
index 0000000..c747244
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1411 @@
+/* bnx2x_stats.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+ #include "bnx2x_cmn.h"
+ #include "bnx2x_stats.h"
+
+/* Statistics */
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+	} while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) { \
+			/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { \
+				/* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else { \
+				/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else { \
+			/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+				d_hi = 0; \
+				d_lo = 0; \
+			} else { \
+				/* m_hi >= s_hi */ \
+				d_hi = m_hi - s_hi; \
+				d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0)
+
+#define UPDATE_STAT64(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+		pstats->mac_stx[0].t##_hi = new->s##_hi; \
+		pstats->mac_stx[0].t##_lo = new->s##_lo; \
+		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+		       pstats->mac_stx[1].t##_lo, diff.lo); \
+	} while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+			diff.lo, new->s##_lo, old->s##_lo); \
+		ADD_64(estats->t##_hi, diff.hi, \
+		       estats->t##_lo, diff.lo); \
+	} while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+	do { \
+		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+			      pstats->mac_stx[1].s##_lo, \
+			      new->s); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+		old_tclient->s = tclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		old_uclient->s = uclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+		old_xclient->s = xclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+	do { \
+		SUB_64(m_hi, 0, m_lo, s); \
+	} while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+	u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+	u32 hi = *hiref;
+
+	return HILO_U64(hi, lo);
+#else
+	return lo;
+#endif
+}
+
+/*
+ * Init service functions
+ */
+
+
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+	if (!bp->stats_pending) {
+		struct eth_query_ramrod_data ramrod_data = {0};
+		int i, rc;
+
+		spin_lock_bh(&bp->stats_lock);
+
+		ramrod_data.drv_counter = bp->stats_counter++;
+		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
+		for_each_queue(bp, i)
+			ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
+
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
+				   ((u32 *)&ramrod_data)[1],
+				   ((u32 *)&ramrod_data)[0], 0);
+		if (rc == 0) {
+			/* stats ramrod has it's own slot on the spq */
+			bp->spq_left++;
+			bp->stats_pending = 1;
+		}
+
+		spin_unlock_bh(&bp->stats_lock);
+	}
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	*stats_comp = DMAE_COMP_VAL;
+	if (CHIP_REV_IS_SLOW(bp))
+		return;
+
+	/* loader */
+	if (bp->executer_idx) {
+		int loader_idx = PMF_DMAE_C(bp);
+
+		memset(dmae, 0, sizeof(struct dmae_command));
+
+		dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+				DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+				DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+				DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+				DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+				(BP_PORT(bp) ? DMAE_CMD_PORT_1 :
+					       DMAE_CMD_PORT_0) |
+				(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+				     sizeof(struct dmae_command) *
+				     (loader_idx + 1)) >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct dmae_command) >> 2;
+		if (CHIP_IS_E1(bp))
+			dmae->len--;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		*stats_comp = 0;
+		bnx2x_post_dmae(bp, dmae, loader_idx);
+
+	} else if (bp->func_stx) {
+		*stats_comp = 0;
+		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+	}
+}
+
+static int bnx2x_stats_comp(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+	int cnt = 10;
+
+	might_sleep();
+	while (*stats_comp != DMAE_COMP_VAL) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for stats finished\n");
+			break;
+		}
+		cnt--;
+		msleep(1);
+	}
+	return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+		  DMAE_CMD_C_ENABLE |
+		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		  DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+	dmae->src_addr_lo = bp->port.port_stx >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->len = DMAE_LEN32_RD_MAX;
+	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+	dmae->comp_addr_hi = 0;
+	dmae->comp_val = 1;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	int port = BP_PORT(bp);
+	int vn = BP_E1HVN(bp);
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 mac_addr;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->link_vars.link_up || !bp->port.pmf) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	/* MCP */
+	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		  DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		  (vn << DMAE_CMD_E1HVN_SHIFT));
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_port_stats) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* MAC */
+	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		  DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		  (vn << DMAE_CMD_E1HVN_SHIFT));
+
+	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+
+		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+				   NIG_REG_INGRESS_BMAC0_MEM);
+
+		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+		   BIGMAC_REGISTER_TX_STAT_GTBYT */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+			     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
+		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+				offsetof(struct bmac_stats, rx_stat_gr64_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+				offsetof(struct bmac_stats, rx_stat_gr64_lo));
+		dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+			     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+
+		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->len = 1;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* NIG */
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = opcode;
+	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+	dmae->comp_addr_hi = 0;
+	dmae->comp_val = 1;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = opcode;
+	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+			offsetof(struct nig_stats, egress_mac_pkt0_lo));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+			offsetof(struct nig_stats, egress_mac_pkt0_lo));
+	dmae->len = (2*sizeof(u32)) >> 2;
+	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+	dmae->comp_addr_hi = 0;
+	dmae->comp_val = 1;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+			DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+			(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+			(vn << DMAE_CMD_E1HVN_SHIFT));
+	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+			offsetof(struct nig_stats, egress_mac_pkt1_lo));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+			offsetof(struct nig_stats, egress_mac_pkt1_lo));
+	dmae->len = (2*sizeof(u32)) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+			DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+	dmae->dst_addr_lo = bp->func_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = sizeof(struct host_func_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+	if (bp->port.pmf)
+		bnx2x_port_stats_init(bp);
+
+	else if (bp->func_stx)
+		bnx2x_func_stats_init(bp);
+
+	bnx2x_hw_stats_post(bp);
+	bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_pmf_update(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+	struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+	UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+	UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+	UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+	UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+	UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+	UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+	UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+	UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+	UPDATE_STAT64(tx_stat_gt127,
+				tx_stat_etherstatspkts65octetsto127octets);
+	UPDATE_STAT64(tx_stat_gt255,
+				tx_stat_etherstatspkts128octetsto255octets);
+	UPDATE_STAT64(tx_stat_gt511,
+				tx_stat_etherstatspkts256octetsto511octets);
+	UPDATE_STAT64(tx_stat_gt1023,
+				tx_stat_etherstatspkts512octetsto1023octets);
+	UPDATE_STAT64(tx_stat_gt1518,
+				tx_stat_etherstatspkts1024octetsto1522octets);
+	UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
+	UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
+	UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
+	UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+	UPDATE_STAT64(tx_stat_gterr,
+				tx_stat_dot3statsinternalmactransmiterrors);
+	UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+	estats->pause_frames_received_hi =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+	estats->pause_frames_received_lo =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+	ADD_64(estats->pause_frames_received_hi,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+	       estats->pause_frames_received_lo,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+	estats->pause_frames_sent_hi =
+			pstats->mac_stx[1].tx_stat_outxonsent_hi;
+	estats->pause_frames_sent_lo =
+			pstats->mac_stx[1].tx_stat_outxonsent_lo;
+	ADD_64(estats->pause_frames_sent_hi,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+	       estats->pause_frames_sent_lo,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+	struct nig_stats *old = &(bp->port.old_nig_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
+		bnx2x_bmac_stats_update(bp);
+
+	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
+		bnx2x_emac_stats_update(bp);
+
+	else { /* unreached */
+		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+		return -1;
+	}
+
+	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+		      new->brb_discard - old->brb_discard);
+	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+		      new->brb_truncate - old->brb_truncate);
+
+	UPDATE_STAT64_NIG(egress_mac_pkt0,
+					etherstatspkts1024octetsto1522octets);
+	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
+
+	memcpy(old, new, sizeof(struct nig_stats));
+
+	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+	       sizeof(struct mac_stx));
+	estats->brb_drop_hi = pstats->brb_drop_hi;
+	estats->brb_drop_lo = pstats->brb_drop_lo;
+
+	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+
+	if (!BP_NOMCP(bp)) {
+		u32 nig_timer_max =
+			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+		if (nig_timer_max != estats->nig_timer_max) {
+			estats->nig_timer_max = nig_timer_max;
+			BNX2X_ERR("NIG timer max (%u)\n",
+				  estats->nig_timer_max);
+		}
+	}
+
+	return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
+	struct tstorm_per_port_stats *tport =
+					&stats->tstorm_common.port_statistics;
+	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	int i;
+	u16 cur_stats_counter;
+
+	/* Make sure we use the value of the counter
+	 * used for sending the last stats ramrod.
+	 */
+	spin_lock_bh(&bp->stats_lock);
+	cur_stats_counter = bp->stats_counter - 1;
+	spin_unlock_bh(&bp->stats_lock);
+
+	memcpy(&(fstats->total_bytes_received_hi),
+	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
+	       sizeof(struct host_func_stats) - 2*sizeof(u32));
+	estats->error_bytes_received_hi = 0;
+	estats->error_bytes_received_lo = 0;
+	estats->etherstatsoverrsizepkts_hi = 0;
+	estats->etherstatsoverrsizepkts_lo = 0;
+	estats->no_buff_discard_hi = 0;
+	estats->no_buff_discard_lo = 0;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		int cl_id = fp->cl_id;
+		struct tstorm_per_client_stats *tclient =
+				&stats->tstorm_common.client_statistics[cl_id];
+		struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
+		struct ustorm_per_client_stats *uclient =
+				&stats->ustorm_common.client_statistics[cl_id];
+		struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
+		struct xstorm_per_client_stats *xclient =
+				&stats->xstorm_common.client_statistics[cl_id];
+		struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
+		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+		u32 diff;
+
+		/* are storm stats valid? */
+		if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
+			DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
+			   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
+			   i, xclient->stats_counter, cur_stats_counter + 1);
+			return -1;
+		}
+		if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
+			DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
+			   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
+			   i, tclient->stats_counter, cur_stats_counter + 1);
+			return -2;
+		}
+		if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
+			DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
+			   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
+			   i, uclient->stats_counter, cur_stats_counter + 1);
+			return -4;
+		}
+
+		qstats->total_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
+		qstats->total_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       le32_to_cpu(tclient->rcv_multicast_bytes.hi),
+		       qstats->total_bytes_received_lo,
+		       le32_to_cpu(tclient->rcv_multicast_bytes.lo));
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       le32_to_cpu(tclient->rcv_unicast_bytes.hi),
+		       qstats->total_bytes_received_lo,
+		       le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+
+		SUB_64(qstats->total_bytes_received_hi,
+		       le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+		       qstats->total_bytes_received_lo,
+		       le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+		SUB_64(qstats->total_bytes_received_hi,
+		       le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+		       qstats->total_bytes_received_lo,
+		       le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+		SUB_64(qstats->total_bytes_received_hi,
+		       le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+		       qstats->total_bytes_received_lo,
+		       le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
+		qstats->valid_bytes_received_hi =
+					qstats->total_bytes_received_hi;
+		qstats->valid_bytes_received_lo =
+					qstats->total_bytes_received_lo;
+
+		qstats->error_bytes_received_hi =
+				le32_to_cpu(tclient->rcv_error_bytes.hi);
+		qstats->error_bytes_received_lo =
+				le32_to_cpu(tclient->rcv_error_bytes.lo);
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       qstats->error_bytes_received_hi,
+		       qstats->total_bytes_received_lo,
+		       qstats->error_bytes_received_lo);
+
+		UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
+					total_unicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
+					total_multicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_TSTAT(packets_too_big_discard,
+					etherstatsoverrsizepkts);
+		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+
+		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+					total_unicast_packets_received);
+		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+					total_multicast_packets_received);
+		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+		qstats->total_bytes_transmitted_hi =
+				le32_to_cpu(xclient->unicast_bytes_sent.hi);
+		qstats->total_bytes_transmitted_lo =
+				le32_to_cpu(xclient->unicast_bytes_sent.lo);
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       le32_to_cpu(xclient->multicast_bytes_sent.hi),
+		       qstats->total_bytes_transmitted_lo,
+		       le32_to_cpu(xclient->multicast_bytes_sent.lo));
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       le32_to_cpu(xclient->broadcast_bytes_sent.hi),
+		       qstats->total_bytes_transmitted_lo,
+		       le32_to_cpu(xclient->broadcast_bytes_sent.lo));
+
+		UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
+					total_unicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
+					total_multicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
+					total_broadcast_packets_transmitted);
+
+		old_tclient->checksum_discard = tclient->checksum_discard;
+		old_tclient->ttl0_discard = tclient->ttl0_discard;
+
+		ADD_64(fstats->total_bytes_received_hi,
+		       qstats->total_bytes_received_hi,
+		       fstats->total_bytes_received_lo,
+		       qstats->total_bytes_received_lo);
+		ADD_64(fstats->total_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_hi,
+		       fstats->total_bytes_transmitted_lo,
+		       qstats->total_bytes_transmitted_lo);
+		ADD_64(fstats->total_unicast_packets_received_hi,
+		       qstats->total_unicast_packets_received_hi,
+		       fstats->total_unicast_packets_received_lo,
+		       qstats->total_unicast_packets_received_lo);
+		ADD_64(fstats->total_multicast_packets_received_hi,
+		       qstats->total_multicast_packets_received_hi,
+		       fstats->total_multicast_packets_received_lo,
+		       qstats->total_multicast_packets_received_lo);
+		ADD_64(fstats->total_broadcast_packets_received_hi,
+		       qstats->total_broadcast_packets_received_hi,
+		       fstats->total_broadcast_packets_received_lo,
+		       qstats->total_broadcast_packets_received_lo);
+		ADD_64(fstats->total_unicast_packets_transmitted_hi,
+		       qstats->total_unicast_packets_transmitted_hi,
+		       fstats->total_unicast_packets_transmitted_lo,
+		       qstats->total_unicast_packets_transmitted_lo);
+		ADD_64(fstats->total_multicast_packets_transmitted_hi,
+		       qstats->total_multicast_packets_transmitted_hi,
+		       fstats->total_multicast_packets_transmitted_lo,
+		       qstats->total_multicast_packets_transmitted_lo);
+		ADD_64(fstats->total_broadcast_packets_transmitted_hi,
+		       qstats->total_broadcast_packets_transmitted_hi,
+		       fstats->total_broadcast_packets_transmitted_lo,
+		       qstats->total_broadcast_packets_transmitted_lo);
+		ADD_64(fstats->valid_bytes_received_hi,
+		       qstats->valid_bytes_received_hi,
+		       fstats->valid_bytes_received_lo,
+		       qstats->valid_bytes_received_lo);
+
+		ADD_64(estats->error_bytes_received_hi,
+		       qstats->error_bytes_received_hi,
+		       estats->error_bytes_received_lo,
+		       qstats->error_bytes_received_lo);
+		ADD_64(estats->etherstatsoverrsizepkts_hi,
+		       qstats->etherstatsoverrsizepkts_hi,
+		       estats->etherstatsoverrsizepkts_lo,
+		       qstats->etherstatsoverrsizepkts_lo);
+		ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
+		       estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+	}
+
+	ADD_64(fstats->total_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       fstats->total_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	memcpy(estats, &(fstats->total_bytes_received_hi),
+	       sizeof(struct host_func_stats) - 2*sizeof(u32));
+
+	ADD_64(estats->etherstatsoverrsizepkts_hi,
+	       estats->rx_stat_dot3statsframestoolong_hi,
+	       estats->etherstatsoverrsizepkts_lo,
+	       estats->rx_stat_dot3statsframestoolong_lo);
+	ADD_64(estats->error_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       estats->error_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	if (bp->port.pmf) {
+		estats->mac_filter_discard =
+				le32_to_cpu(tport->mac_filter_discard);
+		estats->xxoverflow_discard =
+				le32_to_cpu(tport->xxoverflow_discard);
+		estats->brb_truncate_discard =
+				le32_to_cpu(tport->brb_truncate_discard);
+		estats->mac_discard = le32_to_cpu(tport->mac_discard);
+	}
+
+	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+	bp->stats_pending = 0;
+
+	return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct net_device_stats *nstats = &bp->dev->stats;
+	int i;
+
+	nstats->rx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+	nstats->tx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+	nstats->rx_dropped = estats->mac_discard;
+	for_each_queue(bp, i)
+		nstats->rx_dropped +=
+			le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+
+	nstats->tx_dropped = 0;
+
+	nstats->multicast =
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+	nstats->collisions =
+		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+	nstats->rx_length_errors =
+		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+				 bnx2x_hilo(&estats->brb_truncate_hi);
+	nstats->rx_crc_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+	nstats->rx_frame_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+	nstats->rx_missed_errors = estats->xxoverflow_discard;
+
+	nstats->rx_errors = nstats->rx_length_errors +
+			    nstats->rx_over_errors +
+			    nstats->rx_crc_errors +
+			    nstats->rx_frame_errors +
+			    nstats->rx_fifo_errors +
+			    nstats->rx_missed_errors;
+
+	nstats->tx_aborted_errors =
+		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+	nstats->tx_carrier_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+	nstats->tx_fifo_errors = 0;
+	nstats->tx_heartbeat_errors = 0;
+	nstats->tx_window_errors = 0;
+
+	nstats->tx_errors = nstats->tx_aborted_errors +
+			    nstats->tx_carrier_errors +
+	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	int i;
+
+	estats->driver_xoff = 0;
+	estats->rx_err_discard_pkt = 0;
+	estats->rx_skb_alloc_failed = 0;
+	estats->hw_csum_err = 0;
+	for_each_queue(bp, i) {
+		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+
+		estats->driver_xoff += qstats->driver_xoff;
+		estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
+		estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
+		estats->hw_csum_err += qstats->hw_csum_err;
+	}
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	if (*stats_comp != DMAE_COMP_VAL)
+		return;
+
+	if (bp->port.pmf)
+		bnx2x_hw_stats_update(bp);
+
+	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
+		BNX2X_ERR("storm stats were not updated for 3 times\n");
+		bnx2x_panic();
+		return;
+	}
+
+	bnx2x_net_stats_update(bp);
+	bnx2x_drv_stats_update(bp);
+
+	if (netif_msg_timer(bp)) {
+		struct bnx2x_eth_stats *estats = &bp->eth_stats;
+		int i;
+
+		printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
+		       bp->dev->name,
+		       estats->brb_drop_lo, estats->brb_truncate_lo);
+
+		for_each_queue(bp, i) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+			printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
+					  "  rx pkt(%lu)  rx calls(%lu %lu)\n",
+			       fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+			       fp->rx_comp_cons),
+			       le16_to_cpu(*fp->rx_cons_sb),
+			       bnx2x_hilo(&qstats->
+					  total_unicast_packets_received_hi),
+			       fp->rx_calls, fp->rx_pkt);
+		}
+
+		for_each_queue(bp, i) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+			struct netdev_queue *txq =
+				netdev_get_tx_queue(bp->dev, i);
+
+			printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
+					  "  tx pkt(%lu) tx calls (%lu)"
+					  "  %s (Xoff events %u)\n",
+			       fp->name, bnx2x_tx_avail(fp),
+			       le16_to_cpu(*fp->tx_cons_sb),
+			       bnx2x_hilo(&qstats->
+					  total_unicast_packets_transmitted_hi),
+			       fp->tx_pkt,
+			       (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+			       qstats->driver_xoff);
+		}
+	}
+
+	bnx2x_hw_stats_post(bp);
+	bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	bp->executer_idx = 0;
+
+	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+		  DMAE_CMD_C_ENABLE |
+		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+		  DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		if (bp->func_stx)
+			dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+		else
+			dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_port_stats) >> 2;
+		if (bp->func_stx) {
+			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+			dmae->comp_addr_hi = 0;
+			dmae->comp_val = 1;
+		} else {
+			dmae->comp_addr_lo =
+				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_addr_hi =
+				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_val = DMAE_COMP_VAL;
+
+			*stats_comp = 0;
+		}
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_val = DMAE_COMP_VAL;
+
+		*stats_comp = 0;
+	}
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+	int update = 0;
+
+	bnx2x_stats_comp(bp);
+
+	if (bp->port.pmf)
+		update = (bnx2x_hw_stats_update(bp) == 0);
+
+	update |= (bnx2x_storm_stats_update(bp) == 0);
+
+	if (update) {
+		bnx2x_net_stats_update(bp);
+
+		if (bp->port.pmf)
+			bnx2x_port_stats_stop(bp);
+
+		bnx2x_hw_stats_post(bp);
+		bnx2x_stats_comp(bp);
+	}
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+	void (*action)(struct bnx2x *bp);
+	enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state	event	*/
+{
+/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
+/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
+/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+	enum bnx2x_stats_state state;
+
+	if (unlikely(bp->panic))
+		return;
+
+	/* Protect a state change flow */
+	spin_lock_bh(&bp->stats_lock);
+	state = bp->stats_state;
+	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+	spin_unlock_bh(&bp->stats_lock);
+
+	bnx2x_stats_stm[state][event].action(bp);
+
+	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+		   state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+			DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_lo = bp->port.port_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = sizeof(struct host_port_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+{
+	int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
+	int port = BP_PORT(bp);
+	int func;
+	u32 func_stx;
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	/* save our func_stx */
+	func_stx = bp->func_stx;
+
+	for (vn = VN_0; vn < vn_max; vn++) {
+		func = 2*vn + port;
+
+		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+		bnx2x_func_stats_init(bp);
+		bnx2x_hw_stats_post(bp);
+		bnx2x_stats_comp(bp);
+	}
+
+	/* restore our func_stx */
+	bp->func_stx = func_stx;
+}
+
+static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+			DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+			DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+	dmae->src_addr_lo = bp->func_stx >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
+	dmae->len = sizeof(struct host_func_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int i;
+
+	bp->stats_pending = 0;
+	bp->executer_idx = 0;
+	bp->stats_counter = 0;
+
+	/* port and func stats for management */
+	if (!BP_NOMCP(bp)) {
+		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+
+	} else {
+		bp->port.port_stx = 0;
+		bp->func_stx = 0;
+	}
+	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
+	   bp->port.port_stx, bp->func_stx);
+
+	/* port stats */
+	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+	bp->port.old_nig_stats.brb_discard =
+			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+	bp->port.old_nig_stats.brb_truncate =
+			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+
+	/* function stats */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		memset(&fp->old_tclient, 0,
+		       sizeof(struct tstorm_per_client_stats));
+		memset(&fp->old_uclient, 0,
+		       sizeof(struct ustorm_per_client_stats));
+		memset(&fp->old_xclient, 0,
+		       sizeof(struct xstorm_per_client_stats));
+		memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
+	}
+
+	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
+	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
+
+	bp->stats_state = STATS_STATE_DISABLED;
+
+	if (bp->port.pmf) {
+		if (bp->port.port_stx)
+			bnx2x_port_stats_base_init(bp);
+
+		if (bp->func_stx)
+			bnx2x_func_stats_base_init(bp);
+
+	} else if (bp->func_stx)
+		bnx2x_func_stats_base_update(bp);
+}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 0000000..38a4e90
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,239 @@
+/* bnx2x_stats.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct bnx2x_eth_q_stats {
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 error_bytes_received_hi;
+	u32 error_bytes_received_lo;
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+};
+
+#define BNX2X_NUM_Q_STATS		13
+#define Q_STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct nig_stats {
+	u32 brb_discard;
+	u32 brb_packet;
+	u32 brb_truncate;
+	u32 flow_ctrl_discard;
+	u32 flow_ctrl_octets;
+	u32 flow_ctrl_packet;
+	u32 mng_discard;
+	u32 mng_octet_inp;
+	u32 mng_octet_out;
+	u32 mng_packet_inp;
+	u32 mng_packet_out;
+	u32 pbf_octets;
+	u32 pbf_packet;
+	u32 safc_inp;
+	u32 egress_mac_pkt0_lo;
+	u32 egress_mac_pkt0_hi;
+	u32 egress_mac_pkt1_lo;
+	u32 egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+	STATS_EVENT_PMF = 0,
+	STATS_EVENT_LINK_UP,
+	STATS_EVENT_UPDATE,
+	STATS_EVENT_STOP,
+	STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+	STATS_STATE_DISABLED = 0,
+	STATS_STATE_ENABLED,
+	STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 error_bytes_received_hi;
+	u32 error_bytes_received_lo;
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 rx_stat_ifhcinbadoctets_hi;
+	u32 rx_stat_ifhcinbadoctets_lo;
+	u32 tx_stat_ifhcoutbadoctets_hi;
+	u32 tx_stat_ifhcoutbadoctets_lo;
+	u32 rx_stat_dot3statsfcserrors_hi;
+	u32 rx_stat_dot3statsfcserrors_lo;
+	u32 rx_stat_dot3statsalignmenterrors_hi;
+	u32 rx_stat_dot3statsalignmenterrors_lo;
+	u32 rx_stat_dot3statscarriersenseerrors_hi;
+	u32 rx_stat_dot3statscarriersenseerrors_lo;
+	u32 rx_stat_falsecarriererrors_hi;
+	u32 rx_stat_falsecarriererrors_lo;
+	u32 rx_stat_etherstatsundersizepkts_hi;
+	u32 rx_stat_etherstatsundersizepkts_lo;
+	u32 rx_stat_dot3statsframestoolong_hi;
+	u32 rx_stat_dot3statsframestoolong_lo;
+	u32 rx_stat_etherstatsfragments_hi;
+	u32 rx_stat_etherstatsfragments_lo;
+	u32 rx_stat_etherstatsjabbers_hi;
+	u32 rx_stat_etherstatsjabbers_lo;
+	u32 rx_stat_maccontrolframesreceived_hi;
+	u32 rx_stat_maccontrolframesreceived_lo;
+	u32 rx_stat_bmac_xpf_hi;
+	u32 rx_stat_bmac_xpf_lo;
+	u32 rx_stat_bmac_xcf_hi;
+	u32 rx_stat_bmac_xcf_lo;
+	u32 rx_stat_xoffstateentered_hi;
+	u32 rx_stat_xoffstateentered_lo;
+	u32 rx_stat_xonpauseframesreceived_hi;
+	u32 rx_stat_xonpauseframesreceived_lo;
+	u32 rx_stat_xoffpauseframesreceived_hi;
+	u32 rx_stat_xoffpauseframesreceived_lo;
+	u32 tx_stat_outxonsent_hi;
+	u32 tx_stat_outxonsent_lo;
+	u32 tx_stat_outxoffsent_hi;
+	u32 tx_stat_outxoffsent_lo;
+	u32 tx_stat_flowcontroldone_hi;
+	u32 tx_stat_flowcontroldone_lo;
+	u32 tx_stat_etherstatscollisions_hi;
+	u32 tx_stat_etherstatscollisions_lo;
+	u32 tx_stat_dot3statssinglecollisionframes_hi;
+	u32 tx_stat_dot3statssinglecollisionframes_lo;
+	u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+	u32 tx_stat_dot3statsdeferredtransmissions_hi;
+	u32 tx_stat_dot3statsdeferredtransmissions_lo;
+	u32 tx_stat_dot3statsexcessivecollisions_hi;
+	u32 tx_stat_dot3statsexcessivecollisions_lo;
+	u32 tx_stat_dot3statslatecollisions_hi;
+	u32 tx_stat_dot3statslatecollisions_lo;
+	u32 tx_stat_etherstatspkts64octets_hi;
+	u32 tx_stat_etherstatspkts64octets_lo;
+	u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+	u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+	u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	u32 tx_stat_etherstatspktsover1522octets_hi;
+	u32 tx_stat_etherstatspktsover1522octets_lo;
+	u32 tx_stat_bmac_2047_hi;
+	u32 tx_stat_bmac_2047_lo;
+	u32 tx_stat_bmac_4095_hi;
+	u32 tx_stat_bmac_4095_lo;
+	u32 tx_stat_bmac_9216_hi;
+	u32 tx_stat_bmac_9216_lo;
+	u32 tx_stat_bmac_16383_hi;
+	u32 tx_stat_bmac_16383_lo;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+	u32 tx_stat_bmac_ufl_hi;
+	u32 tx_stat_bmac_ufl_lo;
+
+	u32 pause_frames_received_hi;
+	u32 pause_frames_received_lo;
+	u32 pause_frames_sent_hi;
+	u32 pause_frames_sent_lo;
+
+	u32 etherstatspkts1024octetsto1522octets_hi;
+	u32 etherstatspkts1024octetsto1522octets_lo;
+	u32 etherstatspktsover1522octets_hi;
+	u32 etherstatspktsover1522octets_lo;
+
+	u32 brb_drop_hi;
+	u32 brb_drop_lo;
+	u32 brb_truncate_hi;
+	u32 brb_truncate_lo;
+
+	u32 mac_filter_discard;
+	u32 xxoverflow_discard;
+	u32 brb_truncate_discard;
+	u32 mac_discard;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 nig_timer_max;
+};
+
+#define BNX2X_NUM_STATS			43
+#define STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* Forward declaration */
+struct bnx2x;
+
+
+void bnx2x_stats_init(struct bnx2x *bp);
+
+extern const u32 dmae_reg_go_c[];
+extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+			 u32 data_hi, u32 data_lo, int common);
+
+
+#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
deleted file mode 100644
index 51b7883..0000000
--- a/drivers/net/bnx2x_main.c
+++ /dev/null
@@ -1,13933 +0,0 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
- *
- * Copyright (c) 2007-2010 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Eliezer Tamir
- * Based on code from Michael Chan's bnx2 driver
- * UDP CSUM errata workaround by Arik Gendelman
- * Slowpath and fastpath rework by Vladislav Zolotarov
- * Statistics and Link management by Yitchak Gertner
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/device.h>  /* for dev_info() */
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <asm/byteorder.h>
-#include <linux/time.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/tcp.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/workqueue.h>
-#include <linux/crc32.h>
-#include <linux/crc32c.h>
-#include <linux/prefetch.h>
-#include <linux/zlib.h>
-#include <linux/io.h>
-#include <linux/stringify.h>
-
-
-#include "bnx2x.h"
-#include "bnx2x_init.h"
-#include "bnx2x_init_ops.h"
-#include "bnx2x_dump.h"
-
-#define DRV_MODULE_VERSION	"1.52.53-1"
-#define DRV_MODULE_RELDATE	"2010/18/04"
-#define BNX2X_BC_VER		0x040200
-
-#include <linux/firmware.h>
-#include "bnx2x_fw_file_hdr.h"
-/* FW files */
-#define FW_FILE_VERSION					\
-	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
-	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
-	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
-	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
-#define FW_FILE_NAME_E1		"bnx2x-e1-" FW_FILE_VERSION ".fw"
-#define FW_FILE_NAME_E1H	"bnx2x-e1h-" FW_FILE_VERSION ".fw"
-
-/* Time in jiffies before concluding the transmitter is hung */
-#define TX_TIMEOUT		(5*HZ)
-
-static char version[] __devinitdata =
-	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
-	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-MODULE_FIRMWARE(FW_FILE_NAME_E1);
-MODULE_FIRMWARE(FW_FILE_NAME_E1H);
-
-static int multi_mode = 1;
-module_param(multi_mode, int, 0);
-MODULE_PARM_DESC(multi_mode, " Multi queue mode "
-			     "(0 Disable; 1 Enable (default))");
-
-static int num_queues;
-module_param(num_queues, int, 0);
-MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
-				" (default is as a number of CPUs)");
-
-static int disable_tpa;
-module_param(disable_tpa, int, 0);
-MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
-
-static int int_mode;
-module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
-				"(1 INT#x; 2 MSI)");
-
-static int dropless_fc;
-module_param(dropless_fc, int, 0);
-MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
-
-static int poll;
-module_param(poll, int, 0);
-MODULE_PARM_DESC(poll, " Use polling (for debug)");
-
-static int mrrs = -1;
-module_param(mrrs, int, 0);
-MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
-
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, " Default debug msglevel");
-
-static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
-
-static struct workqueue_struct *bnx2x_wq;
-
-enum bnx2x_board_type {
-	BCM57710 = 0,
-	BCM57711 = 1,
-	BCM57711E = 2,
-};
-
-/* indexed by board_type, above */
-static struct {
-	char *name;
-} board_info[] __devinitdata = {
-	{ "Broadcom NetXtreme II BCM57710 XGb" },
-	{ "Broadcom NetXtreme II BCM57711 XGb" },
-	{ "Broadcom NetXtreme II BCM57711E XGb" }
-};
-
-
-static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
-	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
-	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
-	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
-
-/****************************************************************************
-* General service functions
-****************************************************************************/
-
-/* used only at init
- * locking is done by mcp
- */
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
-{
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
-			       PCICFG_VENDOR_ID_OFFSET);
-}
-
-static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
-{
-	u32 val;
-
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
-	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
-			       PCICFG_VENDOR_ID_OFFSET);
-
-	return val;
-}
-
-static const u32 dmae_reg_go_c[] = {
-	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
-	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
-	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
-	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
-};
-
-/* copy command into DMAE command memory and set DMAE command go */
-static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-			    int idx)
-{
-	u32 cmd_offset;
-	int i;
-
-	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
-	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
-		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
-
-		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
-		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
-	}
-	REG_WR(bp, dmae_reg_go_c[idx], 1);
-}
-
-void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
-		      u32 len32)
-{
-	struct dmae_command dmae;
-	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
-	int cnt = 200;
-
-	if (!bp->dmae_ready) {
-		u32 *data = bnx2x_sp(bp, wb_data[0]);
-
-		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
-		   "  using indirect\n", dst_addr, len32);
-		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
-		return;
-	}
-
-	memset(&dmae, 0, sizeof(struct dmae_command));
-
-	dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-	dmae.src_addr_lo = U64_LO(dma_addr);
-	dmae.src_addr_hi = U64_HI(dma_addr);
-	dmae.dst_addr_lo = dst_addr >> 2;
-	dmae.dst_addr_hi = 0;
-	dmae.len = len32;
-	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
-	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
-	dmae.comp_val = DMAE_COMP_VAL;
-
-	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
-	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
-		    "dst_addr [%x:%08x (%08x)]\n"
-	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
-	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
-	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
-	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
-	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
-	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
-	mutex_lock(&bp->dmae_mutex);
-
-	*wb_comp = 0;
-
-	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
-
-	udelay(5);
-
-	while (*wb_comp != DMAE_COMP_VAL) {
-		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
-
-		if (!cnt) {
-			BNX2X_ERR("DMAE timeout!\n");
-			break;
-		}
-		cnt--;
-		/* adjust delay for emulation/FPGA */
-		if (CHIP_REV_IS_SLOW(bp))
-			msleep(100);
-		else
-			udelay(5);
-	}
-
-	mutex_unlock(&bp->dmae_mutex);
-}
-
-void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
-{
-	struct dmae_command dmae;
-	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
-	int cnt = 200;
-
-	if (!bp->dmae_ready) {
-		u32 *data = bnx2x_sp(bp, wb_data[0]);
-		int i;
-
-		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
-		   "  using indirect\n", src_addr, len32);
-		for (i = 0; i < len32; i++)
-			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
-		return;
-	}
-
-	memset(&dmae, 0, sizeof(struct dmae_command));
-
-	dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-	dmae.src_addr_lo = src_addr >> 2;
-	dmae.src_addr_hi = 0;
-	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
-	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
-	dmae.len = len32;
-	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
-	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
-	dmae.comp_val = DMAE_COMP_VAL;
-
-	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
-	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
-		    "dst_addr [%x:%08x (%08x)]\n"
-	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
-	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
-	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
-
-	mutex_lock(&bp->dmae_mutex);
-
-	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
-	*wb_comp = 0;
-
-	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
-
-	udelay(5);
-
-	while (*wb_comp != DMAE_COMP_VAL) {
-
-		if (!cnt) {
-			BNX2X_ERR("DMAE timeout!\n");
-			break;
-		}
-		cnt--;
-		/* adjust delay for emulation/FPGA */
-		if (CHIP_REV_IS_SLOW(bp))
-			msleep(100);
-		else
-			udelay(5);
-	}
-	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
-	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
-	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
-	mutex_unlock(&bp->dmae_mutex);
-}
-
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-			       u32 addr, u32 len)
-{
-	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
-	int offset = 0;
-
-	while (len > dmae_wr_max) {
-		bnx2x_write_dmae(bp, phys_addr + offset,
-				 addr + offset, dmae_wr_max);
-		offset += dmae_wr_max * 4;
-		len -= dmae_wr_max;
-	}
-
-	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
-}
-
-/* used only for slowpath so not inlined */
-static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
-{
-	u32 wb_write[2];
-
-	wb_write[0] = val_hi;
-	wb_write[1] = val_lo;
-	REG_WR_DMAE(bp, reg, wb_write, 2);
-}
-
-#ifdef USE_WB_RD
-static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
-{
-	u32 wb_data[2];
-
-	REG_RD_DMAE(bp, reg, wb_data, 2);
-
-	return HILO_U64(wb_data[0], wb_data[1]);
-}
-#endif
-
-static int bnx2x_mc_assert(struct bnx2x *bp)
-{
-	char last_idx;
-	int i, rc = 0;
-	u32 row0, row1, row2, row3;
-
-	/* XSTORM */
-	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
-			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-				  " 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	/* TSTORM */
-	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
-			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-				  " 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	/* CSTORM */
-	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
-			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
-				  " 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	/* USTORM */
-	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
-			   USTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
-				  " 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	return rc;
-}
-
-static void bnx2x_fw_dump(struct bnx2x *bp)
-{
-	u32 addr;
-	u32 mark, offset;
-	__be32 data[9];
-	int word;
-
-	if (BP_NOMCP(bp)) {
-		BNX2X_ERR("NO MCP - can not dump\n");
-		return;
-	}
-
-	addr = bp->common.shmem_base - 0x0800 + 4;
-	mark = REG_RD(bp, addr);
-	mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
-	pr_err("begin fw dump (mark 0x%x)\n", mark);
-
-	pr_err("");
-	for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
-		for (word = 0; word < 8; word++)
-			data[word] = htonl(REG_RD(bp, offset + 4*word));
-		data[8] = 0x0;
-		pr_cont("%s", (char *)data);
-	}
-	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
-		for (word = 0; word < 8; word++)
-			data[word] = htonl(REG_RD(bp, offset + 4*word));
-		data[8] = 0x0;
-		pr_cont("%s", (char *)data);
-	}
-	pr_err("end of fw dump\n");
-}
-
-static void bnx2x_panic_dump(struct bnx2x *bp)
-{
-	int i;
-	u16 j, start, end;
-
-	bp->stats_state = STATS_STATE_DISABLED;
-	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
-
-	BNX2X_ERR("begin crash dump -----------------\n");
-
-	/* Indices */
-	/* Common */
-	BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
-		  "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
-		  "  spq_prod_idx(0x%x)\n",
-		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
-		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
-
-	/* Rx */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
-			  "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
-			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
-			  i, fp->rx_bd_prod, fp->rx_bd_cons,
-			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
-			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
-		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
-			  "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
-			  fp->rx_sge_prod, fp->last_max_sge,
-			  le16_to_cpu(fp->fp_u_idx),
-			  fp->status_blk->u_status_block.status_block_index);
-	}
-
-	/* Tx */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
-			  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
-			  "  *tx_cons_sb(0x%x)\n",
-			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
-			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
-		BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
-			  "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
-			  fp->status_blk->c_status_block.status_block_index,
-			  fp->tx_db.data.prod);
-	}
-
-	/* Rings */
-	/* Rx */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
-		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
-		for (j = start; j != end; j = RX_BD(j + 1)) {
-			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
-			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
-
-			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
-				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
-		}
-
-		start = RX_SGE(fp->rx_sge_prod);
-		end = RX_SGE(fp->last_max_sge);
-		for (j = start; j != end; j = RX_SGE(j + 1)) {
-			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
-			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
-
-			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
-				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
-		}
-
-		start = RCQ_BD(fp->rx_comp_cons - 10);
-		end = RCQ_BD(fp->rx_comp_cons + 503);
-		for (j = start; j != end; j = RCQ_BD(j + 1)) {
-			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
-
-			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
-				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
-		}
-	}
-
-	/* Tx */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
-		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
-		for (j = start; j != end; j = TX_BD(j + 1)) {
-			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
-
-			BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
-				  i, j, sw_bd->skb, sw_bd->first_bd);
-		}
-
-		start = TX_BD(fp->tx_bd_cons - 10);
-		end = TX_BD(fp->tx_bd_cons + 254);
-		for (j = start; j != end; j = TX_BD(j + 1)) {
-			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
-
-			BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
-				  i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
-		}
-	}
-
-	bnx2x_fw_dump(bp);
-	bnx2x_mc_assert(bp);
-	BNX2X_ERR("end crash dump -----------------\n");
-}
-
-static void bnx2x_int_enable(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
-	u32 val = REG_RD(bp, addr);
-	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
-
-	if (msix) {
-		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
-			 HC_CONFIG_0_REG_INT_LINE_EN_0);
-		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
-			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-	} else if (msi) {
-		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
-		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
-			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
-			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-	} else {
-		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
-			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
-			HC_CONFIG_0_REG_INT_LINE_EN_0 |
-			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
-		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
-		   val, port, addr);
-
-		REG_WR(bp, addr, val);
-
-		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
-	}
-
-	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
-	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
-
-	REG_WR(bp, addr, val);
-	/*
-	 * Ensure that HC_CONFIG is written before leading/trailing edge config
-	 */
-	mmiowb();
-	barrier();
-
-	if (CHIP_IS_E1H(bp)) {
-		/* init leading/trailing edge */
-		if (IS_E1HMF(bp)) {
-			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
-			if (bp->port.pmf)
-				/* enable nig and gpio3 attention */
-				val |= 0x1100;
-		} else
-			val = 0xffff;
-
-		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
-		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
-	}
-
-	/* Make sure that interrupts are indeed enabled from here on */
-	mmiowb();
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
-	u32 val = REG_RD(bp, addr);
-
-	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
-		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
-		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
-		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
-	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
-	   val, port, addr);
-
-	/* flush all outstanding writes */
-	mmiowb();
-
-	REG_WR(bp, addr, val);
-	if (REG_RD(bp, addr) != val)
-		BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
-{
-	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-	int i, offset;
-
-	/* disable interrupt handling */
-	atomic_inc(&bp->intr_sem);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
-	if (disable_hw)
-		/* prevent the HW from sending interrupts */
-		bnx2x_int_disable(bp);
-
-	/* make sure all ISRs are done */
-	if (msix) {
-		synchronize_irq(bp->msix_table[0].vector);
-		offset = 1;
-#ifdef BCM_CNIC
-		offset++;
-#endif
-		for_each_queue(bp, i)
-			synchronize_irq(bp->msix_table[i + offset].vector);
-	} else
-		synchronize_irq(bp->pdev->irq);
-
-	/* make sure sp_task is not running */
-	cancel_delayed_work(&bp->sp_task);
-	flush_workqueue(bnx2x_wq);
-}
-
-/* fast path */
-
-/*
- * General service functions
- */
-
-/* Return true if succeeded to acquire the lock */
-static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
-{
-	u32 lock_status;
-	u32 resource_bit = (1 << resource);
-	int func = BP_FUNC(bp);
-	u32 hw_lock_control_reg;
-
-	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
-
-	/* Validating that the resource is within range */
-	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-		DP(NETIF_MSG_HW,
-		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
-		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
-		return -EINVAL;
-	}
-
-	if (func <= 5)
-		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
-	else
-		hw_lock_control_reg =
-				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
-
-	/* Try to acquire the lock */
-	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
-	lock_status = REG_RD(bp, hw_lock_control_reg);
-	if (lock_status & resource_bit)
-		return true;
-
-	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
-	return false;
-}
-
-static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
-				u8 storm, u16 index, u8 op, u8 update)
-{
-	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
-		       COMMAND_REG_INT_ACK);
-	struct igu_ack_register igu_ack;
-
-	igu_ack.status_block_index = index;
-	igu_ack.sb_id_and_flags =
-			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
-			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
-			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
-			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
-
-	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
-	   (*(u32 *)&igu_ack), hc_addr);
-	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
-
-	/* Make sure that ACK is written */
-	mmiowb();
-	barrier();
-}
-
-static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
-{
-	struct host_status_block *fpsb = fp->status_blk;
-
-	barrier(); /* status block is written to by the chip */
-	fp->fp_c_idx = fpsb->c_status_block.status_block_index;
-	fp->fp_u_idx = fpsb->u_status_block.status_block_index;
-}
-
-static u16 bnx2x_ack_int(struct bnx2x *bp)
-{
-	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
-		       COMMAND_REG_SIMD_MASK);
-	u32 result = REG_RD(bp, hc_addr);
-
-	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
-	   result, hc_addr);
-
-	return result;
-}
-
-
-/*
- * fast path service functions
- */
-
-static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
-{
-	/* Tell compiler that consumer and producer can change */
-	barrier();
-	return (fp->tx_pkt_prod != fp->tx_pkt_cons);
-}
-
-/* free skb in the packet ring at pos idx
- * return idx of last bd freed
- */
-static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			     u16 idx)
-{
-	struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
-	struct eth_tx_start_bd *tx_start_bd;
-	struct eth_tx_bd *tx_data_bd;
-	struct sk_buff *skb = tx_buf->skb;
-	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
-	int nbd;
-
-	/* prefetch skb end pointer to speedup dev_kfree_skb() */
-	prefetch(&skb->end);
-
-	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
-	   idx, tx_buf, skb);
-
-	/* unmap first bd */
-	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
-	tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
-	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
-			 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
-
-	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
-#ifdef BNX2X_STOP_ON_ERROR
-	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
-		BNX2X_ERR("BAD nbd!\n");
-		bnx2x_panic();
-	}
-#endif
-	new_cons = nbd + tx_buf->first_bd;
-
-	/* Get the next bd */
-	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
-	/* Skip a parse bd... */
-	--nbd;
-	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
-	/* ...and the TSO split header bd since they have no mapping */
-	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
-		--nbd;
-		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-	}
-
-	/* now free frags */
-	while (nbd > 0) {
-
-		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
-		tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
-		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
-			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-		if (--nbd)
-			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-	}
-
-	/* release skb */
-	WARN_ON(!skb);
-	dev_kfree_skb(skb);
-	tx_buf->first_bd = 0;
-	tx_buf->skb = NULL;
-
-	return new_cons;
-}
-
-static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
-{
-	s16 used;
-	u16 prod;
-	u16 cons;
-
-	prod = fp->tx_bd_prod;
-	cons = fp->tx_bd_cons;
-
-	/* NUM_TX_RINGS = number of "next-page" entries
-	   It will be used as a threshold */
-	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	WARN_ON(used < 0);
-	WARN_ON(used > fp->bp->tx_ring_size);
-	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
-#endif
-
-	return (s16)(fp->bp->tx_ring_size) - used;
-}
-
-static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
-{
-	u16 hw_cons;
-
-	/* Tell compiler that status block fields can change */
-	barrier();
-	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
-	return hw_cons != fp->tx_pkt_cons;
-}
-
-static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
-{
-	struct bnx2x *bp = fp->bp;
-	struct netdev_queue *txq;
-	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return -1;
-#endif
-
-	txq = netdev_get_tx_queue(bp->dev, fp->index);
-	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
-	sw_cons = fp->tx_pkt_cons;
-
-	while (sw_cons != hw_cons) {
-		u16 pkt_cons;
-
-		pkt_cons = TX_BD(sw_cons);
-
-		/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
-
-		DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
-		   hw_cons, sw_cons, pkt_cons);
-
-/*		if (NEXT_TX_IDX(sw_cons) != hw_cons) {
-			rmb();
-			prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
-		}
-*/
-		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
-		sw_cons++;
-	}
-
-	fp->tx_pkt_cons = sw_cons;
-	fp->tx_bd_cons = bd_cons;
-
-	/* Need to make the tx_bd_cons update visible to start_xmit()
-	 * before checking for netif_tx_queue_stopped().  Without the
-	 * memory barrier, there is a small possibility that
-	 * start_xmit() will miss it and cause the queue to be stopped
-	 * forever.
-	 */
-	smp_mb();
-
-	/* TBD need a thresh? */
-	if (unlikely(netif_tx_queue_stopped(txq))) {
-		/* Taking tx_lock() is needed to prevent reenabling the queue
-		 * while it's empty. This could have happen if rx_action() gets
-		 * suspended in bnx2x_tx_int() after the condition before
-		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
-		 *
-		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
-		 * sends some packets consuming the whole queue again->
-		 * stops the queue
-		 */
-
-		__netif_tx_lock(txq, smp_processor_id());
-
-		if ((netif_tx_queue_stopped(txq)) &&
-		    (bp->state == BNX2X_STATE_OPEN) &&
-		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
-			netif_tx_wake_queue(txq);
-
-		__netif_tx_unlock(txq);
-	}
-	return 0;
-}
-
-#ifdef BCM_CNIC
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
-#endif
-
-static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
-			   union eth_rx_cqe *rr_cqe)
-{
-	struct bnx2x *bp = fp->bp;
-	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
-	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
-
-	DP(BNX2X_MSG_SP,
-	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
-	   fp->index, cid, command, bp->state,
-	   rr_cqe->ramrod_cqe.ramrod_type);
-
-	bp->spq_left++;
-
-	if (fp->index) {
-		switch (command | fp->state) {
-		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
-						BNX2X_FP_STATE_OPENING):
-			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
-			   cid);
-			fp->state = BNX2X_FP_STATE_OPEN;
-			break;
-
-		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
-			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
-			   cid);
-			fp->state = BNX2X_FP_STATE_HALTED;
-			break;
-
-		default:
-			BNX2X_ERR("unexpected MC reply (%d)  "
-				  "fp[%d] state is %x\n",
-				  command, fp->index, fp->state);
-			break;
-		}
-		mb(); /* force bnx2x_wait_ramrod() to see the change */
-		return;
-	}
-
-	switch (command | bp->state) {
-	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
-		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
-		bp->state = BNX2X_STATE_OPEN;
-		break;
-
-	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
-		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
-		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
-		fp->state = BNX2X_FP_STATE_HALTED;
-		break;
-
-	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
-		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
-		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
-		break;
-
-#ifdef BCM_CNIC
-	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
-		DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
-		bnx2x_cnic_cfc_comp(bp, cid);
-		break;
-#endif
-
-	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
-	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
-		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-		bp->set_mac_pending--;
-		smp_wmb();
-		break;
-
-	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
-		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
-		bp->set_mac_pending--;
-		smp_wmb();
-		break;
-
-	default:
-		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
-			  command, bp->state);
-		break;
-	}
-	mb(); /* force bnx2x_wait_ramrod() to see the change */
-}
-
-static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp, u16 index)
-{
-	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
-	struct page *page = sw_buf->page;
-	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-
-	/* Skip "next page" elements */
-	if (!page)
-		return;
-
-	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
-		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
-	__free_pages(page, PAGES_PER_SGE_SHIFT);
-
-	sw_buf->page = NULL;
-	sge->addr_hi = 0;
-	sge->addr_lo = 0;
-}
-
-static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
-					   struct bnx2x_fastpath *fp, int last)
-{
-	int i;
-
-	for (i = 0; i < last; i++)
-		bnx2x_free_rx_sge(bp, fp, i);
-}
-
-static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp, u16 index)
-{
-	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
-	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
-	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-	dma_addr_t mapping;
-
-	if (unlikely(page == NULL))
-		return -ENOMEM;
-
-	mapping = dma_map_page(&bp->pdev->dev, page, 0,
-			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		__free_pages(page, PAGES_PER_SGE_SHIFT);
-		return -ENOMEM;
-	}
-
-	sw_buf->page = page;
-	dma_unmap_addr_set(sw_buf, mapping, mapping);
-
-	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
-	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-	return 0;
-}
-
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp, u16 index)
-{
-	struct sk_buff *skb;
-	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
-	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
-	dma_addr_t mapping;
-
-	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-	if (unlikely(skb == NULL))
-		return -ENOMEM;
-
-	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
-				 DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		dev_kfree_skb(skb);
-		return -ENOMEM;
-	}
-
-	rx_buf->skb = skb;
-	dma_unmap_addr_set(rx_buf, mapping, mapping);
-
-	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-	return 0;
-}
-
-/* note that we are not allocating a new skb,
- * we are just moving one from cons to prod
- * we are not creating a new mapping,
- * so there is no need to check for dma_mapping_error().
- */
-static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
-			       struct sk_buff *skb, u16 cons, u16 prod)
-{
-	struct bnx2x *bp = fp->bp;
-	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
-	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
-	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
-	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
-
-	dma_sync_single_for_device(&bp->pdev->dev,
-				   dma_unmap_addr(cons_rx_buf, mapping),
-				   RX_COPY_THRESH, DMA_FROM_DEVICE);
-
-	prod_rx_buf->skb = cons_rx_buf->skb;
-	dma_unmap_addr_set(prod_rx_buf, mapping,
-			   dma_unmap_addr(cons_rx_buf, mapping));
-	*prod_bd = *cons_bd;
-}
-
-static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
-					     u16 idx)
-{
-	u16 last_max = fp->last_max_sge;
-
-	if (SUB_S16(idx, last_max) > 0)
-		fp->last_max_sge = idx;
-}
-
-static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
-{
-	int i, j;
-
-	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
-		int idx = RX_SGE_CNT * i - 1;
-
-		for (j = 0; j < 2; j++) {
-			SGE_MASK_CLEAR_BIT(fp, idx);
-			idx--;
-		}
-	}
-}
-
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
-				  struct eth_fast_path_rx_cqe *fp_cqe)
-{
-	struct bnx2x *bp = fp->bp;
-	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
-				     le16_to_cpu(fp_cqe->len_on_bd)) >>
-		      SGE_PAGE_SHIFT;
-	u16 last_max, last_elem, first_elem;
-	u16 delta = 0;
-	u16 i;
-
-	if (!sge_len)
-		return;
-
-	/* First mark all used pages */
-	for (i = 0; i < sge_len; i++)
-		SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
-
-	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
-	   sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
-
-	/* Here we assume that the last SGE index is the biggest */
-	prefetch((void *)(fp->sge_mask));
-	bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
-
-	last_max = RX_SGE(fp->last_max_sge);
-	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
-	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
-
-	/* If ring is not full */
-	if (last_elem + 1 != first_elem)
-		last_elem++;
-
-	/* Now update the prod */
-	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
-		if (likely(fp->sge_mask[i]))
-			break;
-
-		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
-		delta += RX_SGE_MASK_ELEM_SZ;
-	}
-
-	if (delta > 0) {
-		fp->rx_sge_prod += delta;
-		/* clear page-end entries */
-		bnx2x_clear_sge_mask_next_elems(fp);
-	}
-
-	DP(NETIF_MSG_RX_STATUS,
-	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
-	   fp->last_max_sge, fp->rx_sge_prod);
-}
-
-static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
-{
-	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
-	memset(fp->sge_mask, 0xff,
-	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
-
-	/* Clear the two last indices in the page to 1:
-	   these are the indices that correspond to the "next" element,
-	   hence will never be indicated and should be removed from
-	   the calculations. */
-	bnx2x_clear_sge_mask_next_elems(fp);
-}
-
-static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-			    struct sk_buff *skb, u16 cons, u16 prod)
-{
-	struct bnx2x *bp = fp->bp;
-	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
-	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
-	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
-	dma_addr_t mapping;
-
-	/* move empty skb from pool to prod and map it */
-	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
-	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-				 bp->rx_buf_size, DMA_FROM_DEVICE);
-	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
-	/* move partial skb from cons to pool (don't unmap yet) */
-	fp->tpa_pool[queue] = *cons_rx_buf;
-
-	/* mark bin state as start - print error if current state != stop */
-	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
-		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
-
-	fp->tpa_state[queue] = BNX2X_TPA_START;
-
-	/* point prod_bd to new skb */
-	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-#ifdef BNX2X_STOP_ON_ERROR
-	fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
-	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
-	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
-	   fp->tpa_queue_used);
-#endif
-}
-
-static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			       struct sk_buff *skb,
-			       struct eth_fast_path_rx_cqe *fp_cqe,
-			       u16 cqe_idx)
-{
-	struct sw_rx_page *rx_pg, old_rx_pg;
-	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
-	u32 i, frag_len, frag_size, pages;
-	int err;
-	int j;
-
-	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
-	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
-
-	/* This is needed in order to enable forwarding support */
-	if (frag_size)
-		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-					       max(frag_size, (u32)len_on_bd));
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
-		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
-			  pages, cqe_idx);
-		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
-			  fp_cqe->pkt_len, len_on_bd);
-		bnx2x_panic();
-		return -EINVAL;
-	}
-#endif
-
-	/* Run through the SGL and compose the fragmented skb */
-	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
-		u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
-
-		/* FW gives the indices of the SGE as if the ring is an array
-		   (meaning that "next" element will consume 2 indices) */
-		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
-		rx_pg = &fp->rx_page_ring[sge_idx];
-		old_rx_pg = *rx_pg;
-
-		/* If we fail to allocate a substitute page, we simply stop
-		   where we are and drop the whole packet */
-		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
-		if (unlikely(err)) {
-			fp->eth_q_stats.rx_skb_alloc_failed++;
-			return err;
-		}
-
-		/* Unmap the page as we r going to pass it to the stack */
-		dma_unmap_page(&bp->pdev->dev,
-			       dma_unmap_addr(&old_rx_pg, mapping),
-			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
-		/* Add one frag and update the appropriate fields in the skb */
-		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
-
-		skb->data_len += frag_len;
-		skb->truesize += frag_len;
-		skb->len += frag_len;
-
-		frag_size -= frag_len;
-	}
-
-	return 0;
-}
-
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
-			   u16 cqe_idx)
-{
-	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
-	struct sk_buff *skb = rx_buf->skb;
-	/* alloc new skb */
-	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-
-	/* Unmap skb in the pool anyway, as we are going to change
-	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
-	   fails. */
-	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
-			 bp->rx_buf_size, DMA_FROM_DEVICE);
-
-	if (likely(new_skb)) {
-		/* fix ip xsum and give it to the stack */
-		/* (no need to map the new skb) */
-#ifdef BCM_VLAN
-		int is_vlan_cqe =
-			(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-			 PARSING_FLAGS_VLAN);
-		int is_not_hwaccel_vlan_cqe =
-			(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
-#endif
-
-		prefetch(skb);
-		prefetch(((char *)(skb)) + 128);
-
-#ifdef BNX2X_STOP_ON_ERROR
-		if (pad + len > bp->rx_buf_size) {
-			BNX2X_ERR("skb_put is about to fail...  "
-				  "pad %d  len %d  rx_buf_size %d\n",
-				  pad, len, bp->rx_buf_size);
-			bnx2x_panic();
-			return;
-		}
-#endif
-
-		skb_reserve(skb, pad);
-		skb_put(skb, len);
-
-		skb->protocol = eth_type_trans(skb, bp->dev);
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-		{
-			struct iphdr *iph;
-
-			iph = (struct iphdr *)skb->data;
-#ifdef BCM_VLAN
-			/* If there is no Rx VLAN offloading -
-			   take VLAN tag into an account */
-			if (unlikely(is_not_hwaccel_vlan_cqe))
-				iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
-#endif
-			iph->check = 0;
-			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
-		}
-
-		if (!bnx2x_fill_frag_skb(bp, fp, skb,
-					 &cqe->fast_path_cqe, cqe_idx)) {
-#ifdef BCM_VLAN
-			if ((bp->vlgrp != NULL) && is_vlan_cqe &&
-			    (!is_not_hwaccel_vlan_cqe))
-				vlan_gro_receive(&fp->napi, bp->vlgrp,
-						 le16_to_cpu(cqe->fast_path_cqe.
-							     vlan_tag), skb);
-			else
-#endif
-				napi_gro_receive(&fp->napi, skb);
-		} else {
-			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
-			   " - dropping packet!\n");
-			dev_kfree_skb(skb);
-		}
-
-
-		/* put new skb in bin */
-		fp->tpa_pool[queue].skb = new_skb;
-
-	} else {
-		/* else drop the packet and keep the buffer in the bin */
-		DP(NETIF_MSG_RX_STATUS,
-		   "Failed to allocate new skb - dropping packet!\n");
-		fp->eth_q_stats.rx_skb_alloc_failed++;
-	}
-
-	fp->tpa_state[queue] = BNX2X_TPA_STOP;
-}
-
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
-					struct bnx2x_fastpath *fp,
-					u16 bd_prod, u16 rx_comp_prod,
-					u16 rx_sge_prod)
-{
-	struct ustorm_eth_rx_producers rx_prods = {0};
-	int i;
-
-	/* Update producers */
-	rx_prods.bd_prod = bd_prod;
-	rx_prods.cqe_prod = rx_comp_prod;
-	rx_prods.sge_prod = rx_sge_prod;
-
-	/*
-	 * Make sure that the BD and SGE data is updated before updating the
-	 * producers since FW might read the BD/SGE right after the producer
-	 * is updated.
-	 * This is only applicable for weak-ordered memory model archs such
-	 * as IA-64. The following barrier is also mandatory since FW will
-	 * assumes BDs must have buffers.
-	 */
-	wmb();
-
-	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
-		       ((u32 *)&rx_prods)[i]);
-
-	mmiowb(); /* keep prod updates ordered */
-
-	DP(NETIF_MSG_RX_STATUS,
-	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
-	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
-					struct sk_buff *skb)
-{
-	/* Set Toeplitz hash from CQE */
-	if ((bp->dev->features & NETIF_F_RXHASH) &&
-	    (cqe->fast_path_cqe.status_flags &
-	     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-		skb->rxhash =
-		le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
-
-static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
-{
-	struct bnx2x *bp = fp->bp;
-	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
-	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
-	int rx_pkt = 0;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return 0;
-#endif
-
-	/* CQ "next element" is of the size of the regular element,
-	   that's why it's ok here */
-	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
-	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-		hw_comp_cons++;
-
-	bd_cons = fp->rx_bd_cons;
-	bd_prod = fp->rx_bd_prod;
-	bd_prod_fw = bd_prod;
-	sw_comp_cons = fp->rx_comp_cons;
-	sw_comp_prod = fp->rx_comp_prod;
-
-	/* Memory barrier necessary as speculative reads of the rx
-	 * buffer can be ahead of the index in the status block
-	 */
-	rmb();
-
-	DP(NETIF_MSG_RX_STATUS,
-	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
-	   fp->index, hw_comp_cons, sw_comp_cons);
-
-	while (sw_comp_cons != hw_comp_cons) {
-		struct sw_rx_bd *rx_buf = NULL;
-		struct sk_buff *skb;
-		union eth_rx_cqe *cqe;
-		u8 cqe_fp_flags;
-		u16 len, pad;
-
-		comp_ring_cons = RCQ_BD(sw_comp_cons);
-		bd_prod = RX_BD(bd_prod);
-		bd_cons = RX_BD(bd_cons);
-
-		/* Prefetch the page containing the BD descriptor
-		   at producer's index. It will be needed when new skb is
-		   allocated */
-		prefetch((void *)(PAGE_ALIGN((unsigned long)
-					     (&fp->rx_desc_ring[bd_prod])) -
-				  PAGE_SIZE + 1));
-
-		cqe = &fp->rx_comp_ring[comp_ring_cons];
-		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-
-		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
-		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
-		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
-		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
-		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
-		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
-
-		/* is this a slowpath msg? */
-		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
-			bnx2x_sp_event(fp, cqe);
-			goto next_cqe;
-
-		/* this is an rx packet */
-		} else {
-			rx_buf = &fp->rx_buf_ring[bd_cons];
-			skb = rx_buf->skb;
-			prefetch(skb);
-			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-			pad = cqe->fast_path_cqe.placement_offset;
-
-			/* If CQE is marked both TPA_START and TPA_END
-			   it is a non-TPA CQE */
-			if ((!fp->disable_tpa) &&
-			    (TPA_TYPE(cqe_fp_flags) !=
-					(TPA_TYPE_START | TPA_TYPE_END))) {
-				u16 queue = cqe->fast_path_cqe.queue_index;
-
-				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
-					DP(NETIF_MSG_RX_STATUS,
-					   "calling tpa_start on queue %d\n",
-					   queue);
-
-					bnx2x_tpa_start(fp, queue, skb,
-							bd_cons, bd_prod);
-
-					/* Set Toeplitz hash for an LRO skb */
-					bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-					goto next_rx;
-				}
-
-				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
-					DP(NETIF_MSG_RX_STATUS,
-					   "calling tpa_stop on queue %d\n",
-					   queue);
-
-					if (!BNX2X_RX_SUM_FIX(cqe))
-						BNX2X_ERR("STOP on none TCP "
-							  "data\n");
-
-					/* This is a size of the linear data
-					   on this skb */
-					len = le16_to_cpu(cqe->fast_path_cqe.
-								len_on_bd);
-					bnx2x_tpa_stop(bp, fp, queue, pad,
-						    len, cqe, comp_ring_cons);
-#ifdef BNX2X_STOP_ON_ERROR
-					if (bp->panic)
-						return 0;
-#endif
-
-					bnx2x_update_sge_prod(fp,
-							&cqe->fast_path_cqe);
-					goto next_cqe;
-				}
-			}
-
-			dma_sync_single_for_device(&bp->pdev->dev,
-					dma_unmap_addr(rx_buf, mapping),
-						   pad + RX_COPY_THRESH,
-						   DMA_FROM_DEVICE);
-			prefetch(((char *)(skb)) + 128);
-
-			/* is this an error packet? */
-			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
-				DP(NETIF_MSG_RX_ERR,
-				   "ERROR  flags %x  rx packet %u\n",
-				   cqe_fp_flags, sw_comp_cons);
-				fp->eth_q_stats.rx_err_discard_pkt++;
-				goto reuse_rx;
-			}
-
-			/* Since we don't have a jumbo ring
-			 * copy small packets if mtu > 1500
-			 */
-			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
-			    (len <= RX_COPY_THRESH)) {
-				struct sk_buff *new_skb;
-
-				new_skb = netdev_alloc_skb(bp->dev,
-							   len + pad);
-				if (new_skb == NULL) {
-					DP(NETIF_MSG_RX_ERR,
-					   "ERROR  packet dropped "
-					   "because of alloc failure\n");
-					fp->eth_q_stats.rx_skb_alloc_failed++;
-					goto reuse_rx;
-				}
-
-				/* aligned copy */
-				skb_copy_from_linear_data_offset(skb, pad,
-						    new_skb->data + pad, len);
-				skb_reserve(new_skb, pad);
-				skb_put(new_skb, len);
-
-				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
-
-				skb = new_skb;
-
-			} else
-			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
-				dma_unmap_single(&bp->pdev->dev,
-					dma_unmap_addr(rx_buf, mapping),
-						 bp->rx_buf_size,
-						 DMA_FROM_DEVICE);
-				skb_reserve(skb, pad);
-				skb_put(skb, len);
-
-			} else {
-				DP(NETIF_MSG_RX_ERR,
-				   "ERROR  packet dropped because "
-				   "of alloc failure\n");
-				fp->eth_q_stats.rx_skb_alloc_failed++;
-reuse_rx:
-				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
-				goto next_rx;
-			}
-
-			skb->protocol = eth_type_trans(skb, bp->dev);
-
-			/* Set Toeplitz hash for a none-LRO skb */
-			bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-			skb->ip_summed = CHECKSUM_NONE;
-			if (bp->rx_csum) {
-				if (likely(BNX2X_RX_CSUM_OK(cqe)))
-					skb->ip_summed = CHECKSUM_UNNECESSARY;
-				else
-					fp->eth_q_stats.hw_csum_err++;
-			}
-		}
-
-		skb_record_rx_queue(skb, fp->index);
-
-#ifdef BCM_VLAN
-		if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
-		    (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-		     PARSING_FLAGS_VLAN))
-			vlan_gro_receive(&fp->napi, bp->vlgrp,
-				le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
-		else
-#endif
-			napi_gro_receive(&fp->napi, skb);
-
-
-next_rx:
-		rx_buf->skb = NULL;
-
-		bd_cons = NEXT_RX_IDX(bd_cons);
-		bd_prod = NEXT_RX_IDX(bd_prod);
-		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
-		rx_pkt++;
-next_cqe:
-		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
-		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
-
-		if (rx_pkt == budget)
-			break;
-	} /* while */
-
-	fp->rx_bd_cons = bd_cons;
-	fp->rx_bd_prod = bd_prod_fw;
-	fp->rx_comp_cons = sw_comp_cons;
-	fp->rx_comp_prod = sw_comp_prod;
-
-	/* Update producers */
-	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
-			     fp->rx_sge_prod);
-
-	fp->rx_pkt += rx_pkt;
-	fp->rx_calls++;
-
-	return rx_pkt;
-}
-
-static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
-{
-	struct bnx2x_fastpath *fp = fp_cookie;
-	struct bnx2x *bp = fp->bp;
-
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
-	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
-	   fp->index, fp->sb_id);
-	bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return IRQ_HANDLED;
-#endif
-
-	/* Handle Rx and Tx according to MSI-X vector */
-	prefetch(fp->rx_cons_sb);
-	prefetch(fp->tx_cons_sb);
-	prefetch(&fp->status_blk->u_status_block.status_block_index);
-	prefetch(&fp->status_blk->c_status_block.status_block_index);
-	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
-{
-	struct bnx2x *bp = netdev_priv(dev_instance);
-	u16 status = bnx2x_ack_int(bp);
-	u16 mask;
-	int i;
-
-	/* Return here if interrupt is shared and it's not for us */
-	if (unlikely(status == 0)) {
-		DP(NETIF_MSG_INTR, "not our interrupt!\n");
-		return IRQ_NONE;
-	}
-	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
-
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return IRQ_HANDLED;
-#endif
-
-	for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		mask = 0x2 << fp->sb_id;
-		if (status & mask) {
-			/* Handle Rx and Tx according to SB id */
-			prefetch(fp->rx_cons_sb);
-			prefetch(&fp->status_blk->u_status_block.
-						status_block_index);
-			prefetch(fp->tx_cons_sb);
-			prefetch(&fp->status_blk->c_status_block.
-						status_block_index);
-			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-			status &= ~mask;
-		}
-	}
-
-#ifdef BCM_CNIC
-	mask = 0x2 << CNIC_SB_ID(bp);
-	if (status & (mask | 0x1)) {
-		struct cnic_ops *c_ops = NULL;
-
-		rcu_read_lock();
-		c_ops = rcu_dereference(bp->cnic_ops);
-		if (c_ops)
-			c_ops->cnic_handler(bp->cnic_data, NULL);
-		rcu_read_unlock();
-
-		status &= ~mask;
-	}
-#endif
-
-	if (unlikely(status & 0x1)) {
-		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
-
-		status &= ~0x1;
-		if (!status)
-			return IRQ_HANDLED;
-	}
-
-	if (unlikely(status))
-		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
-		   status);
-
-	return IRQ_HANDLED;
-}
-
-/* end of fast path */
-
-static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-
-/* Link */
-
-/*
- * General service functions
- */
-
-static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
-{
-	u32 lock_status;
-	u32 resource_bit = (1 << resource);
-	int func = BP_FUNC(bp);
-	u32 hw_lock_control_reg;
-	int cnt;
-
-	/* Validating that the resource is within range */
-	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-		DP(NETIF_MSG_HW,
-		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
-		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
-		return -EINVAL;
-	}
-
-	if (func <= 5) {
-		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
-	} else {
-		hw_lock_control_reg =
-				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
-	}
-
-	/* Validating that the resource is not already taken */
-	lock_status = REG_RD(bp, hw_lock_control_reg);
-	if (lock_status & resource_bit) {
-		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
-		   lock_status, resource_bit);
-		return -EEXIST;
-	}
-
-	/* Try for 5 second every 5ms */
-	for (cnt = 0; cnt < 1000; cnt++) {
-		/* Try to acquire the lock */
-		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
-		lock_status = REG_RD(bp, hw_lock_control_reg);
-		if (lock_status & resource_bit)
-			return 0;
-
-		msleep(5);
-	}
-	DP(NETIF_MSG_HW, "Timeout\n");
-	return -EAGAIN;
-}
-
-static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
-{
-	u32 lock_status;
-	u32 resource_bit = (1 << resource);
-	int func = BP_FUNC(bp);
-	u32 hw_lock_control_reg;
-
-	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
-
-	/* Validating that the resource is within range */
-	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-		DP(NETIF_MSG_HW,
-		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
-		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
-		return -EINVAL;
-	}
-
-	if (func <= 5) {
-		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
-	} else {
-		hw_lock_control_reg =
-				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
-	}
-
-	/* Validating that the resource is currently taken */
-	lock_status = REG_RD(bp, hw_lock_control_reg);
-	if (!(lock_status & resource_bit)) {
-		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
-		   lock_status, resource_bit);
-		return -EFAULT;
-	}
-
-	REG_WR(bp, hw_lock_control_reg, resource_bit);
-	return 0;
-}
-
-/* HW Lock for shared dual port PHYs */
-static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
-{
-	mutex_lock(&bp->port.phy_mutex);
-
-	if (bp->port.need_hw_lock)
-		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
-}
-
-static void bnx2x_release_phy_lock(struct bnx2x *bp)
-{
-	if (bp->port.need_hw_lock)
-		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
-
-	mutex_unlock(&bp->port.phy_mutex);
-}
-
-int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
-{
-	/* The GPIO should be swapped if swap register is set and active */
-	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-	int gpio_shift = gpio_num +
-			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-	u32 gpio_mask = (1 << gpio_shift);
-	u32 gpio_reg;
-	int value;
-
-	if (gpio_num > MISC_REGISTERS_GPIO_3) {
-		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-		return -EINVAL;
-	}
-
-	/* read GPIO value */
-	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
-
-	/* get the requested pin value */
-	if ((gpio_reg & gpio_mask) == gpio_mask)
-		value = 1;
-	else
-		value = 0;
-
-	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
-
-	return value;
-}
-
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
-{
-	/* The GPIO should be swapped if swap register is set and active */
-	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-	int gpio_shift = gpio_num +
-			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-	u32 gpio_mask = (1 << gpio_shift);
-	u32 gpio_reg;
-
-	if (gpio_num > MISC_REGISTERS_GPIO_3) {
-		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-		return -EINVAL;
-	}
-
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-	/* read GPIO and mask except the float bits */
-	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
-
-	switch (mode) {
-	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
-		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
-		   gpio_num, gpio_shift);
-		/* clear FLOAT and set CLR */
-		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
-		break;
-
-	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
-		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
-		   gpio_num, gpio_shift);
-		/* clear FLOAT and set SET */
-		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
-		break;
-
-	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
-		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
-		   gpio_num, gpio_shift);
-		/* set FLOAT */
-		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-		break;
-
-	default:
-		break;
-	}
-
-	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-
-	return 0;
-}
-
-int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
-{
-	/* The GPIO should be swapped if swap register is set and active */
-	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-	int gpio_shift = gpio_num +
-			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-	u32 gpio_mask = (1 << gpio_shift);
-	u32 gpio_reg;
-
-	if (gpio_num > MISC_REGISTERS_GPIO_3) {
-		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-		return -EINVAL;
-	}
-
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-	/* read GPIO int */
-	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
-
-	switch (mode) {
-	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
-		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
-				   "output low\n", gpio_num, gpio_shift);
-		/* clear SET and set CLR */
-		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
-		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
-		break;
-
-	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
-		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
-				   "output high\n", gpio_num, gpio_shift);
-		/* clear CLR and set SET */
-		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
-		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
-		break;
-
-	default:
-		break;
-	}
-
-	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-
-	return 0;
-}
-
-static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
-{
-	u32 spio_mask = (1 << spio_num);
-	u32 spio_reg;
-
-	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
-	    (spio_num > MISC_REGISTERS_SPIO_7)) {
-		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
-		return -EINVAL;
-	}
-
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
-	/* read SPIO and mask except the float bits */
-	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
-
-	switch (mode) {
-	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
-		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
-		/* clear FLOAT and set CLR */
-		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
-		break;
-
-	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
-		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
-		/* clear FLOAT and set SET */
-		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
-		break;
-
-	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
-		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
-		/* set FLOAT */
-		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-		break;
-
-	default:
-		break;
-	}
-
-	REG_WR(bp, MISC_REG_SPIO, spio_reg);
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
-
-	return 0;
-}
-
-static void bnx2x_calc_fc_adv(struct bnx2x *bp)
-{
-	switch (bp->link_vars.ieee_fc &
-		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
-	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
-		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
-					  ADVERTISED_Pause);
-		break;
-
-	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
-		bp->port.advertising |= (ADVERTISED_Asym_Pause |
-					 ADVERTISED_Pause);
-		break;
-
-	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
-		bp->port.advertising |= ADVERTISED_Asym_Pause;
-		break;
-
-	default:
-		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
-					  ADVERTISED_Pause);
-		break;
-	}
-}
-
-static void bnx2x_link_report(struct bnx2x *bp)
-{
-	if (bp->flags & MF_FUNC_DIS) {
-		netif_carrier_off(bp->dev);
-		netdev_err(bp->dev, "NIC Link is Down\n");
-		return;
-	}
-
-	if (bp->link_vars.link_up) {
-		u16 line_speed;
-
-		if (bp->state == BNX2X_STATE_OPEN)
-			netif_carrier_on(bp->dev);
-		netdev_info(bp->dev, "NIC Link is Up, ");
-
-		line_speed = bp->link_vars.line_speed;
-		if (IS_E1HMF(bp)) {
-			u16 vn_max_rate;
-
-			vn_max_rate =
-				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
-				 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-			if (vn_max_rate < line_speed)
-				line_speed = vn_max_rate;
-		}
-		pr_cont("%d Mbps ", line_speed);
-
-		if (bp->link_vars.duplex == DUPLEX_FULL)
-			pr_cont("full duplex");
-		else
-			pr_cont("half duplex");
-
-		if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
-			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
-				pr_cont(", receive ");
-				if (bp->link_vars.flow_ctrl &
-				    BNX2X_FLOW_CTRL_TX)
-					pr_cont("& transmit ");
-			} else {
-				pr_cont(", transmit ");
-			}
-			pr_cont("flow control ON");
-		}
-		pr_cont("\n");
-
-	} else { /* link_down */
-		netif_carrier_off(bp->dev);
-		netdev_err(bp->dev, "NIC Link is Down\n");
-	}
-}
-
-static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
-{
-	if (!BP_NOMCP(bp)) {
-		u8 rc;
-
-		/* Initialize link parameters structure variables */
-		/* It is recommended to turn off RX FC for jumbo frames
-		   for better performance */
-		if (bp->dev->mtu > 5000)
-			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
-		else
-			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
-
-		bnx2x_acquire_phy_lock(bp);
-
-		if (load_mode == LOAD_DIAG)
-			bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
-
-		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-
-		bnx2x_release_phy_lock(bp);
-
-		bnx2x_calc_fc_adv(bp);
-
-		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
-			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-			bnx2x_link_report(bp);
-		}
-
-		return rc;
-	}
-	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
-	return -EINVAL;
-}
-
-static void bnx2x_link_set(struct bnx2x *bp)
-{
-	if (!BP_NOMCP(bp)) {
-		bnx2x_acquire_phy_lock(bp);
-		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-		bnx2x_release_phy_lock(bp);
-
-		bnx2x_calc_fc_adv(bp);
-	} else
-		BNX2X_ERR("Bootcode is missing - can not set link\n");
-}
-
-static void bnx2x__link_reset(struct bnx2x *bp)
-{
-	if (!BP_NOMCP(bp)) {
-		bnx2x_acquire_phy_lock(bp);
-		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
-		bnx2x_release_phy_lock(bp);
-	} else
-		BNX2X_ERR("Bootcode is missing - can not reset link\n");
-}
-
-static u8 bnx2x_link_test(struct bnx2x *bp)
-{
-	u8 rc = 0;
-
-	if (!BP_NOMCP(bp)) {
-		bnx2x_acquire_phy_lock(bp);
-		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
-		bnx2x_release_phy_lock(bp);
-	} else
-		BNX2X_ERR("Bootcode is missing - can not test link\n");
-
-	return rc;
-}
-
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
-{
-	u32 r_param = bp->link_vars.line_speed / 8;
-	u32 fair_periodic_timeout_usec;
-	u32 t_fair;
-
-	memset(&(bp->cmng.rs_vars), 0,
-	       sizeof(struct rate_shaping_vars_per_port));
-	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
-
-	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
-
-	/* this is the threshold below which no timer arming will occur
-	   1.25 coefficient is for the threshold to be a little bigger
-	   than the real time, to compensate for timer in-accuracy */
-	bp->cmng.rs_vars.rs_threshold =
-				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
-
-	/* resolution of fairness timer */
-	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-	/* for 10G it is 1000usec. for 1G it is 10000usec. */
-	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
-
-	/* this is the threshold below which we won't arm the timer anymore */
-	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
-
-	/* we multiply by 1e3/8 to get bytes/msec.
-	   We don't want the credits to pass a credit
-	   of the t_fair*FAIR_MEM (algorithm resolution) */
-	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-	/* since each tick is 4 usec */
-	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
-
-/* Calculates the sum of vn_min_rates.
-   It's needed for further normalizing of the min_rates.
-   Returns:
-     sum of vn_min_rates.
-       or
-     0 - if all the min_rates are 0.
-     In the later case fainess algorithm should be deactivated.
-     If not all min_rates are zero then those that are zeroes will be set to 1.
- */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
-{
-	int all_zero = 1;
-	int port = BP_PORT(bp);
-	int vn;
-
-	bp->vn_weight_sum = 0;
-	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-		int func = 2*vn + port;
-		u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-
-		/* Skip hidden vns */
-		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-			continue;
-
-		/* If min rate is zero - set it to 1 */
-		if (!vn_min_rate)
-			vn_min_rate = DEF_MIN_RATE;
-		else
-			all_zero = 0;
-
-		bp->vn_weight_sum += vn_min_rate;
-	}
-
-	/* ... only if all min rates are zeros - disable fairness */
-	if (all_zero) {
-		bp->cmng.flags.cmng_enables &=
-					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-		   "  fairness will be disabled\n");
-	} else
-		bp->cmng.flags.cmng_enables |=
-					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-}
-
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
-{
-	struct rate_shaping_vars_per_vn m_rs_vn;
-	struct fairness_vars_per_vn m_fair_vn;
-	u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-	u16 vn_min_rate, vn_max_rate;
-	int i;
-
-	/* If function is hidden - set min and max to zeroes */
-	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-		vn_min_rate = 0;
-		vn_max_rate = 0;
-
-	} else {
-		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-		/* If min rate is zero - set it to 1 */
-		if (!vn_min_rate)
-			vn_min_rate = DEF_MIN_RATE;
-		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-	}
-	DP(NETIF_MSG_IFUP,
-	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
-
-	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-	/* global vn counter - maximal Mbps for this vn */
-	m_rs_vn.vn_counter.rate = vn_max_rate;
-
-	/* quota - number of bytes transmitted in this period */
-	m_rs_vn.vn_counter.quota =
-				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-	if (bp->vn_weight_sum) {
-		/* credit for each period of the fairness algorithm:
-		   number of bytes in T_FAIR (the vn share the port rate).
-		   vn_weight_sum should not be larger than 10000, thus
-		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-		   than zero */
-		m_fair_vn.vn_credit_delta =
-			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-						   (8 * bp->vn_weight_sum))),
-			      (bp->cmng.fair_vars.fair_threshold * 2));
-		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-		   m_fair_vn.vn_credit_delta);
-	}
-
-	/* Store it to internal memory */
-	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_rs_vn))[i]);
-
-	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_fair_vn))[i]);
-}
-
-
-/* This function is called upon link interrupt */
-static void bnx2x_link_attn(struct bnx2x *bp)
-{
-	u32 prev_link_status = bp->link_vars.link_status;
-	/* Make sure that we are synced with the current statistics */
-	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-	bnx2x_link_update(&bp->link_params, &bp->link_vars);
-
-	if (bp->link_vars.link_up) {
-
-		/* dropless flow control */
-		if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
-			int port = BP_PORT(bp);
-			u32 pause_enabled = 0;
-
-			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-				pause_enabled = 1;
-
-			REG_WR(bp, BAR_USTRORM_INTMEM +
-			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
-			       pause_enabled);
-		}
-
-		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-			struct host_port_stats *pstats;
-
-			pstats = bnx2x_sp(bp, port_stats);
-			/* reset old bmac stats */
-			memset(&(pstats->mac_stx[0]), 0,
-			       sizeof(struct mac_stx));
-		}
-		if (bp->state == BNX2X_STATE_OPEN)
-			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-	}
-
-	/* indicate link status only if link status actually changed */
-	if (prev_link_status != bp->link_vars.link_status)
-		bnx2x_link_report(bp);
-
-	if (IS_E1HMF(bp)) {
-		int port = BP_PORT(bp);
-		int func;
-		int vn;
-
-		/* Set the attention towards other drivers on the same port */
-		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-			if (vn == BP_E1HVN(bp))
-				continue;
-
-			func = ((vn << 1) | port);
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-		}
-
-		if (bp->link_vars.link_up) {
-			int i;
-
-			/* Init rate shaping and fairness contexts */
-			bnx2x_init_port_minmax(bp);
-
-			for (vn = VN_0; vn < E1HVN_MAX; vn++)
-				bnx2x_init_vn_minmax(bp, 2*vn + port);
-
-			/* Store it to internal memory */
-			for (i = 0;
-			     i < sizeof(struct cmng_struct_per_port) / 4; i++)
-				REG_WR(bp, BAR_XSTRORM_INTMEM +
-				  XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
-				       ((u32 *)(&bp->cmng))[i]);
-		}
-	}
-}
-
-static void bnx2x__link_status_update(struct bnx2x *bp)
-{
-	if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
-		return;
-
-	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
-
-	if (bp->link_vars.link_up)
-		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-	else
-		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-	bnx2x_calc_vn_weight_sum(bp);
-
-	/* indicate link status */
-	bnx2x_link_report(bp);
-}
-
-static void bnx2x_pmf_update(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	u32 val;
-
-	bp->port.pmf = 1;
-	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
-
-	/* enable nig attention */
-	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
-	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
-	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
-
-	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
-}
-
-/* end of Link */
-
-/* slow path */
-
-/*
- * General service functions
- */
-
-/* send the MCP a request, block until there is a reply */
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
-{
-	int func = BP_FUNC(bp);
-	u32 seq = ++bp->fw_seq;
-	u32 rc = 0;
-	u32 cnt = 1;
-	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
-
-	mutex_lock(&bp->fw_mb_mutex);
-	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
-	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
-
-	do {
-		/* let the FW do it's magic ... */
-		msleep(delay);
-
-		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
-
-		/* Give the FW up to 5 second (500*10ms) */
-	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
-
-	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
-	   cnt*delay, rc, seq);
-
-	/* is this a reply to our command? */
-	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
-		rc &= FW_MSG_CODE_MASK;
-	else {
-		/* FW BUG! */
-		BNX2X_ERR("FW failed to respond!\n");
-		bnx2x_fw_dump(bp);
-		rc = 0;
-	}
-	mutex_unlock(&bp->fw_mb_mutex);
-
-	return rc;
-}
-
-static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
-static void bnx2x_set_rx_mode(struct net_device *dev);
-
-static void bnx2x_e1h_disable(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-
-	netif_tx_disable(bp->dev);
-
-	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
-	netif_carrier_off(bp->dev);
-}
-
-static void bnx2x_e1h_enable(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-
-	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
-
-	/* Tx queue should be only reenabled */
-	netif_tx_wake_all_queues(bp->dev);
-
-	/*
-	 * Should not call netif_carrier_on since it will be called if the link
-	 * is up when checking for link state
-	 */
-}
-
-static void bnx2x_update_min_max(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int vn, i;
-
-	/* Init rate shaping and fairness contexts */
-	bnx2x_init_port_minmax(bp);
-
-	bnx2x_calc_vn_weight_sum(bp);
-
-	for (vn = VN_0; vn < E1HVN_MAX; vn++)
-		bnx2x_init_vn_minmax(bp, 2*vn + port);
-
-	if (bp->port.pmf) {
-		int func;
-
-		/* Set the attention towards other drivers on the same port */
-		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-			if (vn == BP_E1HVN(bp))
-				continue;
-
-			func = ((vn << 1) | port);
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-		}
-
-		/* Store it to internal memory */
-		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
-			REG_WR(bp, BAR_XSTRORM_INTMEM +
-			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
-			       ((u32 *)(&bp->cmng))[i]);
-	}
-}
-
-static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
-{
-	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
-
-	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
-
-		/*
-		 * This is the only place besides the function initialization
-		 * where the bp->flags can change so it is done without any
-		 * locks
-		 */
-		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
-			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
-			bp->flags |= MF_FUNC_DIS;
-
-			bnx2x_e1h_disable(bp);
-		} else {
-			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
-			bp->flags &= ~MF_FUNC_DIS;
-
-			bnx2x_e1h_enable(bp);
-		}
-		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
-	}
-	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
-
-		bnx2x_update_min_max(bp);
-		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
-	}
-
-	/* Report results to MCP */
-	if (dcc_event)
-		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
-	else
-		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
-}
-
-/* must be called under the spq lock */
-static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
-{
-	struct eth_spe *next_spe = bp->spq_prod_bd;
-
-	if (bp->spq_prod_bd == bp->spq_last_bd) {
-		bp->spq_prod_bd = bp->spq;
-		bp->spq_prod_idx = 0;
-		DP(NETIF_MSG_TIMER, "end of spq\n");
-	} else {
-		bp->spq_prod_bd++;
-		bp->spq_prod_idx++;
-	}
-	return next_spe;
-}
-
-/* must be called under the spq lock */
-static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-
-	/* Make sure that BD data is updated before writing the producer */
-	wmb();
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
-	       bp->spq_prod_idx);
-	mmiowb();
-}
-
-/* the slow path queue is odd since completions arrive on the fastpath ring */
-static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
-			 u32 data_hi, u32 data_lo, int common)
-{
-	struct eth_spe *spe;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return -EIO;
-#endif
-
-	spin_lock_bh(&bp->spq_lock);
-
-	if (!bp->spq_left) {
-		BNX2X_ERR("BUG! SPQ ring full!\n");
-		spin_unlock_bh(&bp->spq_lock);
-		bnx2x_panic();
-		return -EBUSY;
-	}
-
-	spe = bnx2x_sp_get_next(bp);
-
-	/* CID needs port number to be encoded int it */
-	spe->hdr.conn_and_cmd_data =
-			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
-				    HW_CID(bp, cid));
-	spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
-	if (common)
-		spe->hdr.type |=
-			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
-
-	spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
-	spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
-
-	bp->spq_left--;
-
-	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
-	   "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
-	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
-	   (u32)(U64_LO(bp->spq_mapping) +
-	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
-	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
-
-	bnx2x_sp_prod_update(bp);
-	spin_unlock_bh(&bp->spq_lock);
-	return 0;
-}
-
-/* acquire split MCP access lock register */
-static int bnx2x_acquire_alr(struct bnx2x *bp)
-{
-	u32 j, val;
-	int rc = 0;
-
-	might_sleep();
-	for (j = 0; j < 1000; j++) {
-		val = (1UL << 31);
-		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
-		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
-		if (val & (1L << 31))
-			break;
-
-		msleep(5);
-	}
-	if (!(val & (1L << 31))) {
-		BNX2X_ERR("Cannot acquire MCP access lock register\n");
-		rc = -EBUSY;
-	}
-
-	return rc;
-}
-
-/* release split MCP access lock register */
-static void bnx2x_release_alr(struct bnx2x *bp)
-{
-	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
-}
-
-static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
-{
-	struct host_def_status_block *def_sb = bp->def_status_blk;
-	u16 rc = 0;
-
-	barrier(); /* status block is written to by the chip */
-	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
-		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
-		rc |= 1;
-	}
-	if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
-		bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
-		rc |= 2;
-	}
-	if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
-		bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
-		rc |= 4;
-	}
-	if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
-		bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
-		rc |= 8;
-	}
-	if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
-		bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
-		rc |= 16;
-	}
-	return rc;
-}
-
-/*
- * slow path service functions
- */
-
-static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
-{
-	int port = BP_PORT(bp);
-	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
-		       COMMAND_REG_ATTN_BITS_SET);
-	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
-	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
-				       NIG_REG_MASK_INTERRUPT_PORT0;
-	u32 aeu_mask;
-	u32 nig_mask = 0;
-
-	if (bp->attn_state & asserted)
-		BNX2X_ERR("IGU ERROR\n");
-
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-	aeu_mask = REG_RD(bp, aeu_addr);
-
-	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
-	   aeu_mask, asserted);
-	aeu_mask &= ~(asserted & 0x3ff);
-	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
-
-	REG_WR(bp, aeu_addr, aeu_mask);
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-
-	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
-	bp->attn_state |= asserted;
-	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
-
-	if (asserted & ATTN_HARD_WIRED_MASK) {
-		if (asserted & ATTN_NIG_FOR_FUNC) {
-
-			bnx2x_acquire_phy_lock(bp);
-
-			/* save nig interrupt mask */
-			nig_mask = REG_RD(bp, nig_int_mask_addr);
-			REG_WR(bp, nig_int_mask_addr, 0);
-
-			bnx2x_link_attn(bp);
-
-			/* handle unicore attn? */
-		}
-		if (asserted & ATTN_SW_TIMER_4_FUNC)
-			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
-
-		if (asserted & GPIO_2_FUNC)
-			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
-
-		if (asserted & GPIO_3_FUNC)
-			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
-
-		if (asserted & GPIO_4_FUNC)
-			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
-
-		if (port == 0) {
-			if (asserted & ATTN_GENERAL_ATTN_1) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
-			}
-			if (asserted & ATTN_GENERAL_ATTN_2) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
-			}
-			if (asserted & ATTN_GENERAL_ATTN_3) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
-			}
-		} else {
-			if (asserted & ATTN_GENERAL_ATTN_4) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
-			}
-			if (asserted & ATTN_GENERAL_ATTN_5) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
-			}
-			if (asserted & ATTN_GENERAL_ATTN_6) {
-				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
-				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
-			}
-		}
-
-	} /* if hardwired */
-
-	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
-	   asserted, hc_addr);
-	REG_WR(bp, hc_addr, asserted);
-
-	/* now set back the mask */
-	if (asserted & ATTN_NIG_FOR_FUNC) {
-		REG_WR(bp, nig_int_mask_addr, nig_mask);
-		bnx2x_release_phy_lock(bp);
-	}
-}
-
-static inline void bnx2x_fan_failure(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-
-	/* mark the failure */
-	bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-	bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
-	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
-		 bp->link_params.ext_phy_config);
-
-	/* log the failure */
-	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
-	       " the driver to shutdown the card to prevent permanent"
-	       " damage.  Please contact OEM Support for assistance\n");
-}
-
-static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
-{
-	int port = BP_PORT(bp);
-	int reg_offset;
-	u32 val, swap_val, swap_override;
-
-	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-
-	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
-
-		val = REG_RD(bp, reg_offset);
-		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
-		REG_WR(bp, reg_offset, val);
-
-		BNX2X_ERR("SPIO5 hw attention\n");
-
-		/* Fan failure attention */
-		switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-			/* Low power mode is controlled by GPIO 2 */
-			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-			/* The PHY reset is controlled by GPIO 1 */
-			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-			/* The PHY reset is controlled by GPIO 1 */
-			/* fake the port number to cancel the swap done in
-			   set_gpio() */
-			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-			swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-			port = (swap_val && swap_override) ^ 1;
-			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-			break;
-
-		default:
-			break;
-		}
-		bnx2x_fan_failure(bp);
-	}
-
-	if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
-		    AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
-		bnx2x_acquire_phy_lock(bp);
-		bnx2x_handle_module_detect_int(&bp->link_params);
-		bnx2x_release_phy_lock(bp);
-	}
-
-	if (attn & HW_INTERRUT_ASSERT_SET_0) {
-
-		val = REG_RD(bp, reg_offset);
-		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
-		REG_WR(bp, reg_offset, val);
-
-		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
-			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
-		bnx2x_panic();
-	}
-}
-
-static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
-{
-	u32 val;
-
-	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
-
-		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
-		BNX2X_ERR("DB hw attention 0x%x\n", val);
-		/* DORQ discard attention */
-		if (val & 0x2)
-			BNX2X_ERR("FATAL error from DORQ\n");
-	}
-
-	if (attn & HW_INTERRUT_ASSERT_SET_1) {
-
-		int port = BP_PORT(bp);
-		int reg_offset;
-
-		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
-				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
-
-		val = REG_RD(bp, reg_offset);
-		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
-		REG_WR(bp, reg_offset, val);
-
-		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
-			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
-		bnx2x_panic();
-	}
-}
-
-static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
-{
-	u32 val;
-
-	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
-
-		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
-		BNX2X_ERR("CFC hw attention 0x%x\n", val);
-		/* CFC error attention */
-		if (val & 0x2)
-			BNX2X_ERR("FATAL error from CFC\n");
-	}
-
-	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
-
-		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
-		BNX2X_ERR("PXP hw attention 0x%x\n", val);
-		/* RQ_USDMDP_FIFO_OVERFLOW */
-		if (val & 0x18000)
-			BNX2X_ERR("FATAL error from PXP\n");
-	}
-
-	if (attn & HW_INTERRUT_ASSERT_SET_2) {
-
-		int port = BP_PORT(bp);
-		int reg_offset;
-
-		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
-				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
-
-		val = REG_RD(bp, reg_offset);
-		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
-		REG_WR(bp, reg_offset, val);
-
-		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
-			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
-		bnx2x_panic();
-	}
-}
-
-static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
-{
-	u32 val;
-
-	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
-
-		if (attn & BNX2X_PMF_LINK_ASSERT) {
-			int func = BP_FUNC(bp);
-
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
-			bp->mf_config = SHMEM_RD(bp,
-					   mf_cfg.func_mf_config[func].config);
-			val = SHMEM_RD(bp, func_mb[func].drv_status);
-			if (val & DRV_STATUS_DCC_EVENT_MASK)
-				bnx2x_dcc_event(bp,
-					    (val & DRV_STATUS_DCC_EVENT_MASK));
-			bnx2x__link_status_update(bp);
-			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
-				bnx2x_pmf_update(bp);
-
-		} else if (attn & BNX2X_MC_ASSERT_BITS) {
-
-			BNX2X_ERR("MC assert!\n");
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
-			bnx2x_panic();
-
-		} else if (attn & BNX2X_MCP_ASSERT) {
-
-			BNX2X_ERR("MCP assert!\n");
-			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
-			bnx2x_fw_dump(bp);
-
-		} else
-			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
-	}
-
-	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
-		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
-		if (attn & BNX2X_GRC_TIMEOUT) {
-			val = CHIP_IS_E1H(bp) ?
-				REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
-			BNX2X_ERR("GRC time-out 0x%08x\n", val);
-		}
-		if (attn & BNX2X_GRC_RSV) {
-			val = CHIP_IS_E1H(bp) ?
-				REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
-			BNX2X_ERR("GRC reserved 0x%08x\n", val);
-		}
-		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
-	}
-}
-
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
-static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
-
-
-#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
-#define LOAD_COUNTER_BITS	16 /* Number of bits for load counter */
-#define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
-#define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
-#define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
-/*
- * should be run under rtnl lock
- */
-static inline void bnx2x_set_reset_done(struct bnx2x *bp)
-{
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	val &= ~(1 << RESET_DONE_FLAG_SHIFT);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
-	barrier();
-	mmiowb();
-}
-
-/*
- * should be run under rtnl lock
- */
-static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
-{
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	val |= (1 << 16);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
-	barrier();
-	mmiowb();
-}
-
-/*
- * should be run under rtnl lock
- */
-static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
-{
-	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
-	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
-	return (val & RESET_DONE_FLAG_MASK) ? false : true;
-}
-
-/*
- * should be run under rtnl lock
- */
-static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
-{
-	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-
-	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
-
-	val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
-	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
-	barrier();
-	mmiowb();
-}
-
-/*
- * should be run under rtnl lock
- */
-static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
-{
-	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-
-	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
-
-	val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
-	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
-	barrier();
-	mmiowb();
-
-	return val1;
-}
-
-/*
- * should be run under rtnl lock
- */
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
-{
-	return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
-}
-
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
-{
-	u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-	REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
-}
-
-static inline void _print_next_block(int idx, const char *blk)
-{
-	if (idx)
-		pr_cont(", ");
-	pr_cont("%s", blk);
-}
-
-static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
-{
-	int i = 0;
-	u32 cur_bit = 0;
-	for (i = 0; sig; i++) {
-		cur_bit = ((u32)0x1 << i);
-		if (sig & cur_bit) {
-			switch (cur_bit) {
-			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-				_print_next_block(par_num++, "BRB");
-				break;
-			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-				_print_next_block(par_num++, "PARSER");
-				break;
-			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "TSDM");
-				break;
-			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-				_print_next_block(par_num++, "SEARCHER");
-				break;
-			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "TSEMI");
-				break;
-			}
-
-			/* Clear the bit */
-			sig &= ~cur_bit;
-		}
-	}
-
-	return par_num;
-}
-
-static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
-{
-	int i = 0;
-	u32 cur_bit = 0;
-	for (i = 0; sig; i++) {
-		cur_bit = ((u32)0x1 << i);
-		if (sig & cur_bit) {
-			switch (cur_bit) {
-			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-				_print_next_block(par_num++, "PBCLIENT");
-				break;
-			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
-				_print_next_block(par_num++, "QM");
-				break;
-			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "XSDM");
-				break;
-			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "XSEMI");
-				break;
-			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
-				_print_next_block(par_num++, "DOORBELLQ");
-				break;
-			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
-				_print_next_block(par_num++, "VAUX PCI CORE");
-				break;
-			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
-				_print_next_block(par_num++, "DEBUG");
-				break;
-			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
-				_print_next_block(par_num++, "USDM");
-				break;
-			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "USEMI");
-				break;
-			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
-				_print_next_block(par_num++, "UPB");
-				break;
-			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
-				_print_next_block(par_num++, "CSDM");
-				break;
-			}
-
-			/* Clear the bit */
-			sig &= ~cur_bit;
-		}
-	}
-
-	return par_num;
-}
-
-static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
-{
-	int i = 0;
-	u32 cur_bit = 0;
-	for (i = 0; sig; i++) {
-		cur_bit = ((u32)0x1 << i);
-		if (sig & cur_bit) {
-			switch (cur_bit) {
-			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-				_print_next_block(par_num++, "CSEMI");
-				break;
-			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-				_print_next_block(par_num++, "PXP");
-				break;
-			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-				_print_next_block(par_num++,
-					"PXPPCICLOCKCLIENT");
-				break;
-			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-				_print_next_block(par_num++, "CFC");
-				break;
-			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-				_print_next_block(par_num++, "CDU");
-				break;
-			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-				_print_next_block(par_num++, "IGU");
-				break;
-			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-				_print_next_block(par_num++, "MISC");
-				break;
-			}
-
-			/* Clear the bit */
-			sig &= ~cur_bit;
-		}
-	}
-
-	return par_num;
-}
-
-static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
-{
-	int i = 0;
-	u32 cur_bit = 0;
-	for (i = 0; sig; i++) {
-		cur_bit = ((u32)0x1 << i);
-		if (sig & cur_bit) {
-			switch (cur_bit) {
-			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
-				_print_next_block(par_num++, "MCP ROM");
-				break;
-			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
-				_print_next_block(par_num++, "MCP UMP RX");
-				break;
-			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
-				_print_next_block(par_num++, "MCP UMP TX");
-				break;
-			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
-				_print_next_block(par_num++, "MCP SCPAD");
-				break;
-			}
-
-			/* Clear the bit */
-			sig &= ~cur_bit;
-		}
-	}
-
-	return par_num;
-}
-
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
-				     u32 sig2, u32 sig3)
-{
-	if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
-	    (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
-		int par_num = 0;
-		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
-			"[0]:0x%08x [1]:0x%08x "
-			"[2]:0x%08x [3]:0x%08x\n",
-			  sig0 & HW_PRTY_ASSERT_SET_0,
-			  sig1 & HW_PRTY_ASSERT_SET_1,
-			  sig2 & HW_PRTY_ASSERT_SET_2,
-			  sig3 & HW_PRTY_ASSERT_SET_3);
-		printk(KERN_ERR"%s: Parity errors detected in blocks: ",
-		       bp->dev->name);
-		par_num = bnx2x_print_blocks_with_parity0(
-			sig0 & HW_PRTY_ASSERT_SET_0, par_num);
-		par_num = bnx2x_print_blocks_with_parity1(
-			sig1 & HW_PRTY_ASSERT_SET_1, par_num);
-		par_num = bnx2x_print_blocks_with_parity2(
-			sig2 & HW_PRTY_ASSERT_SET_2, par_num);
-		par_num = bnx2x_print_blocks_with_parity3(
-			sig3 & HW_PRTY_ASSERT_SET_3, par_num);
-		printk("\n");
-		return true;
-	} else
-		return false;
-}
-
-static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
-{
-	struct attn_route attn;
-	int port = BP_PORT(bp);
-
-	attn.sig[0] = REG_RD(bp,
-		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
-			     port*4);
-	attn.sig[1] = REG_RD(bp,
-		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
-			     port*4);
-	attn.sig[2] = REG_RD(bp,
-		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
-			     port*4);
-	attn.sig[3] = REG_RD(bp,
-		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
-			     port*4);
-
-	return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
-					attn.sig[3]);
-}
-
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
-{
-	struct attn_route attn, *group_mask;
-	int port = BP_PORT(bp);
-	int index;
-	u32 reg_addr;
-	u32 val;
-	u32 aeu_mask;
-
-	/* need to take HW lock because MCP or other port might also
-	   try to handle this event */
-	bnx2x_acquire_alr(bp);
-
-	if (bnx2x_chk_parity_attn(bp)) {
-		bp->recovery_state = BNX2X_RECOVERY_INIT;
-		bnx2x_set_reset_in_progress(bp);
-		schedule_delayed_work(&bp->reset_task, 0);
-		/* Disable HW interrupts */
-		bnx2x_int_disable(bp);
-		bnx2x_release_alr(bp);
-		/* In case of parity errors don't handle attentions so that
-		 * other function would "see" parity errors.
-		 */
-		return;
-	}
-
-	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
-	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
-	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
-	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
-	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
-	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
-
-	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
-		if (deasserted & (1 << index)) {
-			group_mask = &bp->attn_group[index];
-
-			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
-			   index, group_mask->sig[0], group_mask->sig[1],
-			   group_mask->sig[2], group_mask->sig[3]);
-
-			bnx2x_attn_int_deasserted3(bp,
-					attn.sig[3] & group_mask->sig[3]);
-			bnx2x_attn_int_deasserted1(bp,
-					attn.sig[1] & group_mask->sig[1]);
-			bnx2x_attn_int_deasserted2(bp,
-					attn.sig[2] & group_mask->sig[2]);
-			bnx2x_attn_int_deasserted0(bp,
-					attn.sig[0] & group_mask->sig[0]);
-		}
-	}
-
-	bnx2x_release_alr(bp);
-
-	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
-
-	val = ~deasserted;
-	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
-	   val, reg_addr);
-	REG_WR(bp, reg_addr, val);
-
-	if (~bp->attn_state & deasserted)
-		BNX2X_ERR("IGU ERROR\n");
-
-	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
-
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-	aeu_mask = REG_RD(bp, reg_addr);
-
-	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
-	   aeu_mask, deasserted);
-	aeu_mask |= (deasserted & 0x3ff);
-	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
-
-	REG_WR(bp, reg_addr, aeu_mask);
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-
-	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
-	bp->attn_state &= ~deasserted;
-	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
-}
-
-static void bnx2x_attn_int(struct bnx2x *bp)
-{
-	/* read local copy of bits */
-	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
-								attn_bits);
-	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
-								attn_bits_ack);
-	u32 attn_state = bp->attn_state;
-
-	/* look for changed bits */
-	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
-	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
-
-	DP(NETIF_MSG_HW,
-	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
-	   attn_bits, attn_ack, asserted, deasserted);
-
-	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
-		BNX2X_ERR("BAD attention state\n");
-
-	/* handle bits that were raised */
-	if (asserted)
-		bnx2x_attn_int_asserted(bp, asserted);
-
-	if (deasserted)
-		bnx2x_attn_int_deasserted(bp, deasserted);
-}
-
-static void bnx2x_sp_task(struct work_struct *work)
-{
-	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
-	u16 status;
-
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return;
-	}
-
-	status = bnx2x_update_dsb_idx(bp);
-/*	if (status == 0)				     */
-/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
-
-	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
-
-	/* HW attentions */
-	if (status & 0x1) {
-		bnx2x_attn_int(bp);
-		status &= ~0x1;
-	}
-
-	/* CStorm events: STAT_QUERY */
-	if (status & 0x2) {
-		DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
-		status &= ~0x2;
-	}
-
-	if (unlikely(status))
-		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
-		   status);
-
-	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
-		     IGU_INT_NOP, 1);
-	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
-		     IGU_INT_NOP, 1);
-	bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
-		     IGU_INT_NOP, 1);
-	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
-		     IGU_INT_NOP, 1);
-	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
-		     IGU_INT_ENABLE, 1);
-}
-
-static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
-{
-	struct net_device *dev = dev_instance;
-	struct bnx2x *bp = netdev_priv(dev);
-
-	/* Return here if interrupt is disabled */
-	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-		return IRQ_HANDLED;
-	}
-
-	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return IRQ_HANDLED;
-#endif
-
-#ifdef BCM_CNIC
-	{
-		struct cnic_ops *c_ops;
-
-		rcu_read_lock();
-		c_ops = rcu_dereference(bp->cnic_ops);
-		if (c_ops)
-			c_ops->cnic_handler(bp->cnic_data, NULL);
-		rcu_read_unlock();
-	}
-#endif
-	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
-
-	return IRQ_HANDLED;
-}
-
-/* end of slow path */
-
-/* Statistics */
-
-/****************************************************************************
-* Macros
-****************************************************************************/
-
-/* sum[hi:lo] += add[hi:lo] */
-#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
-	do { \
-		s_lo += a_lo; \
-		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
-	} while (0)
-
-/* difference = minuend - subtrahend */
-#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
-	do { \
-		if (m_lo < s_lo) { \
-			/* underflow */ \
-			d_hi = m_hi - s_hi; \
-			if (d_hi > 0) { \
-				/* we can 'loan' 1 */ \
-				d_hi--; \
-				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
-			} else { \
-				/* m_hi <= s_hi */ \
-				d_hi = 0; \
-				d_lo = 0; \
-			} \
-		} else { \
-			/* m_lo >= s_lo */ \
-			if (m_hi < s_hi) { \
-				d_hi = 0; \
-				d_lo = 0; \
-			} else { \
-				/* m_hi >= s_hi */ \
-				d_hi = m_hi - s_hi; \
-				d_lo = m_lo - s_lo; \
-			} \
-		} \
-	} while (0)
-
-#define UPDATE_STAT64(s, t) \
-	do { \
-		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
-			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
-		pstats->mac_stx[0].t##_hi = new->s##_hi; \
-		pstats->mac_stx[0].t##_lo = new->s##_lo; \
-		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
-		       pstats->mac_stx[1].t##_lo, diff.lo); \
-	} while (0)
-
-#define UPDATE_STAT64_NIG(s, t) \
-	do { \
-		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
-			diff.lo, new->s##_lo, old->s##_lo); \
-		ADD_64(estats->t##_hi, diff.hi, \
-		       estats->t##_lo, diff.lo); \
-	} while (0)
-
-/* sum[hi:lo] += add */
-#define ADD_EXTEND_64(s_hi, s_lo, a) \
-	do { \
-		s_lo += a; \
-		s_hi += (s_lo < a) ? 1 : 0; \
-	} while (0)
-
-#define UPDATE_EXTEND_STAT(s) \
-	do { \
-		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
-			      pstats->mac_stx[1].s##_lo, \
-			      new->s); \
-	} while (0)
-
-#define UPDATE_EXTEND_TSTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
-		old_tclient->s = tclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-#define UPDATE_EXTEND_USTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-		old_uclient->s = uclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-#define UPDATE_EXTEND_XSTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
-		old_xclient->s = xclient->s; \
-		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-/* minuend -= subtrahend */
-#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
-	do { \
-		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
-	} while (0)
-
-/* minuend[hi:lo] -= subtrahend */
-#define SUB_EXTEND_64(m_hi, m_lo, s) \
-	do { \
-		SUB_64(m_hi, 0, m_lo, s); \
-	} while (0)
-
-#define SUB_EXTEND_USTAT(s, t) \
-	do { \
-		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-	} while (0)
-
-/*
- * General service functions
- */
-
-static inline long bnx2x_hilo(u32 *hiref)
-{
-	u32 lo = *(hiref + 1);
-#if (BITS_PER_LONG == 64)
-	u32 hi = *hiref;
-
-	return HILO_U64(hi, lo);
-#else
-	return lo;
-#endif
-}
-
-/*
- * Init service functions
- */
-
-static void bnx2x_storm_stats_post(struct bnx2x *bp)
-{
-	if (!bp->stats_pending) {
-		struct eth_query_ramrod_data ramrod_data = {0};
-		int i, rc;
-
-		ramrod_data.drv_counter = bp->stats_counter++;
-		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
-		for_each_queue(bp, i)
-			ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
-
-		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
-				   ((u32 *)&ramrod_data)[1],
-				   ((u32 *)&ramrod_data)[0], 0);
-		if (rc == 0) {
-			/* stats ramrod has it's own slot on the spq */
-			bp->spq_left++;
-			bp->stats_pending = 1;
-		}
-	}
-}
-
-static void bnx2x_hw_stats_post(struct bnx2x *bp)
-{
-	struct dmae_command *dmae = &bp->stats_dmae;
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	*stats_comp = DMAE_COMP_VAL;
-	if (CHIP_REV_IS_SLOW(bp))
-		return;
-
-	/* loader */
-	if (bp->executer_idx) {
-		int loader_idx = PMF_DMAE_C(bp);
-
-		memset(dmae, 0, sizeof(struct dmae_command));
-
-		dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-				DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-				DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-				DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-				DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-				(BP_PORT(bp) ? DMAE_CMD_PORT_1 :
-					       DMAE_CMD_PORT_0) |
-				(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
-		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
-		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
-				     sizeof(struct dmae_command) *
-				     (loader_idx + 1)) >> 2;
-		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct dmae_command) >> 2;
-		if (CHIP_IS_E1(bp))
-			dmae->len--;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-		*stats_comp = 0;
-		bnx2x_post_dmae(bp, dmae, loader_idx);
-
-	} else if (bp->func_stx) {
-		*stats_comp = 0;
-		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
-	}
-}
-
-static int bnx2x_stats_comp(struct bnx2x *bp)
-{
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-	int cnt = 10;
-
-	might_sleep();
-	while (*stats_comp != DMAE_COMP_VAL) {
-		if (!cnt) {
-			BNX2X_ERR("timeout waiting for stats finished\n");
-			break;
-		}
-		cnt--;
-		msleep(1);
-	}
-	return 1;
-}
-
-/*
- * Statistics service functions
- */
-
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
-	struct dmae_command *dmae;
-	u32 opcode;
-	int loader_idx = PMF_DMAE_C(bp);
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	/* sanity */
-	if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	bp->executer_idx = 0;
-
-	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-		  DMAE_CMD_C_ENABLE |
-		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		  DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
-	dmae->src_addr_lo = bp->port.port_stx >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-	dmae->len = DMAE_LEN32_RD_MAX;
-	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-	dmae->comp_addr_hi = 0;
-	dmae->comp_val = 1;
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
-				   DMAE_LEN32_RD_MAX * 4);
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
-				   DMAE_LEN32_RD_MAX * 4);
-	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
-	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_val = DMAE_COMP_VAL;
-
-	*stats_comp = 0;
-	bnx2x_hw_stats_post(bp);
-	bnx2x_stats_comp(bp);
-}
-
-static void bnx2x_port_stats_init(struct bnx2x *bp)
-{
-	struct dmae_command *dmae;
-	int port = BP_PORT(bp);
-	int vn = BP_E1HVN(bp);
-	u32 opcode;
-	int loader_idx = PMF_DMAE_C(bp);
-	u32 mac_addr;
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	/* sanity */
-	if (!bp->link_vars.link_up || !bp->port.pmf) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	bp->executer_idx = 0;
-
-	/* MCP */
-	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		  DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		  (vn << DMAE_CMD_E1HVN_SHIFT));
-
-	if (bp->port.port_stx) {
-
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-		dmae->dst_addr_lo = bp->port.port_stx >> 2;
-		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_port_stats) >> 2;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-	}
-
-	if (bp->func_stx) {
-
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-		dmae->dst_addr_lo = bp->func_stx >> 2;
-		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_func_stats) >> 2;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-	}
-
-	/* MAC */
-	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		  DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		  (vn << DMAE_CMD_E1HVN_SHIFT));
-
-	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-
-		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
-				   NIG_REG_INGRESS_BMAC0_MEM);
-
-		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
-		   BIGMAC_REGISTER_TX_STAT_GTBYT */
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = (mac_addr +
-				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
-			     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
-		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = (mac_addr +
-				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac_stats, rx_stat_gr64_lo));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-				offsetof(struct bmac_stats, rx_stat_gr64_lo));
-		dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
-			     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
-
-		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
-
-		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = (mac_addr +
-				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
-		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = (mac_addr +
-				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
-		dmae->len = 1;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-
-		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = opcode;
-		dmae->src_addr_lo = (mac_addr +
-				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
-		dmae->src_addr_hi = 0;
-		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
-		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
-		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
-		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-		dmae->comp_addr_hi = 0;
-		dmae->comp_val = 1;
-	}
-
-	/* NIG */
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = opcode;
-	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
-				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
-	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
-	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-	dmae->comp_addr_hi = 0;
-	dmae->comp_val = 1;
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = opcode;
-	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
-				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt0_lo));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt0_lo));
-	dmae->len = (2*sizeof(u32)) >> 2;
-	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-	dmae->comp_addr_hi = 0;
-	dmae->comp_val = 1;
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-			DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-			DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-			(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-			(vn << DMAE_CMD_E1HVN_SHIFT));
-	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
-				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt1_lo));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-			offsetof(struct nig_stats, egress_mac_pkt1_lo));
-	dmae->len = (2*sizeof(u32)) >> 2;
-	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_val = DMAE_COMP_VAL;
-
-	*stats_comp = 0;
-}
-
-static void bnx2x_func_stats_init(struct bnx2x *bp)
-{
-	struct dmae_command *dmae = &bp->stats_dmae;
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	/* sanity */
-	if (!bp->func_stx) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	bp->executer_idx = 0;
-	memset(dmae, 0, sizeof(struct dmae_command));
-
-	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-			DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-			DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-	dmae->dst_addr_lo = bp->func_stx >> 2;
-	dmae->dst_addr_hi = 0;
-	dmae->len = sizeof(struct host_func_stats) >> 2;
-	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_val = DMAE_COMP_VAL;
-
-	*stats_comp = 0;
-}
-
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
-	if (bp->port.pmf)
-		bnx2x_port_stats_init(bp);
-
-	else if (bp->func_stx)
-		bnx2x_func_stats_init(bp);
-
-	bnx2x_hw_stats_post(bp);
-	bnx2x_storm_stats_post(bp);
-}
-
-static void bnx2x_stats_pmf_start(struct bnx2x *bp)
-{
-	bnx2x_stats_comp(bp);
-	bnx2x_stats_pmf_update(bp);
-	bnx2x_stats_start(bp);
-}
-
-static void bnx2x_stats_restart(struct bnx2x *bp)
-{
-	bnx2x_stats_comp(bp);
-	bnx2x_stats_start(bp);
-}
-
-static void bnx2x_bmac_stats_update(struct bnx2x *bp)
-{
-	struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
-	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-	struct {
-		u32 lo;
-		u32 hi;
-	} diff;
-
-	UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
-	UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
-	UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
-	UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
-	UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
-	UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
-	UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
-	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
-	UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
-	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
-	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
-	UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
-	UPDATE_STAT64(tx_stat_gt127,
-				tx_stat_etherstatspkts65octetsto127octets);
-	UPDATE_STAT64(tx_stat_gt255,
-				tx_stat_etherstatspkts128octetsto255octets);
-	UPDATE_STAT64(tx_stat_gt511,
-				tx_stat_etherstatspkts256octetsto511octets);
-	UPDATE_STAT64(tx_stat_gt1023,
-				tx_stat_etherstatspkts512octetsto1023octets);
-	UPDATE_STAT64(tx_stat_gt1518,
-				tx_stat_etherstatspkts1024octetsto1522octets);
-	UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
-	UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
-	UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
-	UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
-	UPDATE_STAT64(tx_stat_gterr,
-				tx_stat_dot3statsinternalmactransmiterrors);
-	UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
-
-	estats->pause_frames_received_hi =
-				pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
-	estats->pause_frames_received_lo =
-				pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
-
-	estats->pause_frames_sent_hi =
-				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
-	estats->pause_frames_sent_lo =
-				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
-}
-
-static void bnx2x_emac_stats_update(struct bnx2x *bp)
-{
-	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
-	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-
-	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
-	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
-	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
-	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
-	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
-	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
-	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
-	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
-	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
-	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
-	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
-	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
-	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
-	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
-	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
-	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
-	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
-	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
-	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
-
-	estats->pause_frames_received_hi =
-			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
-	estats->pause_frames_received_lo =
-			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
-	ADD_64(estats->pause_frames_received_hi,
-	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
-	       estats->pause_frames_received_lo,
-	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
-
-	estats->pause_frames_sent_hi =
-			pstats->mac_stx[1].tx_stat_outxonsent_hi;
-	estats->pause_frames_sent_lo =
-			pstats->mac_stx[1].tx_stat_outxonsent_lo;
-	ADD_64(estats->pause_frames_sent_hi,
-	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
-	       estats->pause_frames_sent_lo,
-	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
-}
-
-static int bnx2x_hw_stats_update(struct bnx2x *bp)
-{
-	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
-	struct nig_stats *old = &(bp->port.old_nig_stats);
-	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-	struct {
-		u32 lo;
-		u32 hi;
-	} diff;
-
-	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
-		bnx2x_bmac_stats_update(bp);
-
-	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
-		bnx2x_emac_stats_update(bp);
-
-	else { /* unreached */
-		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
-		return -1;
-	}
-
-	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
-		      new->brb_discard - old->brb_discard);
-	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
-		      new->brb_truncate - old->brb_truncate);
-
-	UPDATE_STAT64_NIG(egress_mac_pkt0,
-					etherstatspkts1024octetsto1522octets);
-	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
-
-	memcpy(old, new, sizeof(struct nig_stats));
-
-	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
-	       sizeof(struct mac_stx));
-	estats->brb_drop_hi = pstats->brb_drop_hi;
-	estats->brb_drop_lo = pstats->brb_drop_lo;
-
-	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
-
-	if (!BP_NOMCP(bp)) {
-		u32 nig_timer_max =
-			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
-		if (nig_timer_max != estats->nig_timer_max) {
-			estats->nig_timer_max = nig_timer_max;
-			BNX2X_ERR("NIG timer max (%u)\n",
-				  estats->nig_timer_max);
-		}
-	}
-
-	return 0;
-}
-
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
-{
-	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
-	struct tstorm_per_port_stats *tport =
-					&stats->tstorm_common.port_statistics;
-	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-	int i;
-
-	memcpy(&(fstats->total_bytes_received_hi),
-	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
-	       sizeof(struct host_func_stats) - 2*sizeof(u32));
-	estats->error_bytes_received_hi = 0;
-	estats->error_bytes_received_lo = 0;
-	estats->etherstatsoverrsizepkts_hi = 0;
-	estats->etherstatsoverrsizepkts_lo = 0;
-	estats->no_buff_discard_hi = 0;
-	estats->no_buff_discard_lo = 0;
-
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-		int cl_id = fp->cl_id;
-		struct tstorm_per_client_stats *tclient =
-				&stats->tstorm_common.client_statistics[cl_id];
-		struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
-		struct ustorm_per_client_stats *uclient =
-				&stats->ustorm_common.client_statistics[cl_id];
-		struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
-		struct xstorm_per_client_stats *xclient =
-				&stats->xstorm_common.client_statistics[cl_id];
-		struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
-		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-		u32 diff;
-
-		/* are storm stats valid? */
-		if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
-							bp->stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
-			   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, xclient->stats_counter, bp->stats_counter);
-			return -1;
-		}
-		if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
-							bp->stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
-			   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, tclient->stats_counter, bp->stats_counter);
-			return -2;
-		}
-		if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
-							bp->stats_counter) {
-			DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
-			   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
-			   i, uclient->stats_counter, bp->stats_counter);
-			return -4;
-		}
-
-		qstats->total_bytes_received_hi =
-			le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
-		qstats->total_bytes_received_lo =
-			le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
-
-		ADD_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(tclient->rcv_multicast_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(tclient->rcv_multicast_bytes.lo));
-
-		ADD_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(tclient->rcv_unicast_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(tclient->rcv_unicast_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
-
-		SUB_64(qstats->total_bytes_received_hi,
-		       le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
-		       qstats->total_bytes_received_lo,
-		       le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
-
-		qstats->valid_bytes_received_hi =
-					qstats->total_bytes_received_hi;
-		qstats->valid_bytes_received_lo =
-					qstats->total_bytes_received_lo;
-
-		qstats->error_bytes_received_hi =
-				le32_to_cpu(tclient->rcv_error_bytes.hi);
-		qstats->error_bytes_received_lo =
-				le32_to_cpu(tclient->rcv_error_bytes.lo);
-
-		ADD_64(qstats->total_bytes_received_hi,
-		       qstats->error_bytes_received_hi,
-		       qstats->total_bytes_received_lo,
-		       qstats->error_bytes_received_lo);
-
-		UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
-					total_unicast_packets_received);
-		UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
-					total_multicast_packets_received);
-		UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
-					total_broadcast_packets_received);
-		UPDATE_EXTEND_TSTAT(packets_too_big_discard,
-					etherstatsoverrsizepkts);
-		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
-
-		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
-					total_unicast_packets_received);
-		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
-					total_multicast_packets_received);
-		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
-					total_broadcast_packets_received);
-		UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
-		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
-		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
-
-		qstats->total_bytes_transmitted_hi =
-				le32_to_cpu(xclient->unicast_bytes_sent.hi);
-		qstats->total_bytes_transmitted_lo =
-				le32_to_cpu(xclient->unicast_bytes_sent.lo);
-
-		ADD_64(qstats->total_bytes_transmitted_hi,
-		       le32_to_cpu(xclient->multicast_bytes_sent.hi),
-		       qstats->total_bytes_transmitted_lo,
-		       le32_to_cpu(xclient->multicast_bytes_sent.lo));
-
-		ADD_64(qstats->total_bytes_transmitted_hi,
-		       le32_to_cpu(xclient->broadcast_bytes_sent.hi),
-		       qstats->total_bytes_transmitted_lo,
-		       le32_to_cpu(xclient->broadcast_bytes_sent.lo));
-
-		UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
-					total_unicast_packets_transmitted);
-		UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
-					total_multicast_packets_transmitted);
-		UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
-					total_broadcast_packets_transmitted);
-
-		old_tclient->checksum_discard = tclient->checksum_discard;
-		old_tclient->ttl0_discard = tclient->ttl0_discard;
-
-		ADD_64(fstats->total_bytes_received_hi,
-		       qstats->total_bytes_received_hi,
-		       fstats->total_bytes_received_lo,
-		       qstats->total_bytes_received_lo);
-		ADD_64(fstats->total_bytes_transmitted_hi,
-		       qstats->total_bytes_transmitted_hi,
-		       fstats->total_bytes_transmitted_lo,
-		       qstats->total_bytes_transmitted_lo);
-		ADD_64(fstats->total_unicast_packets_received_hi,
-		       qstats->total_unicast_packets_received_hi,
-		       fstats->total_unicast_packets_received_lo,
-		       qstats->total_unicast_packets_received_lo);
-		ADD_64(fstats->total_multicast_packets_received_hi,
-		       qstats->total_multicast_packets_received_hi,
-		       fstats->total_multicast_packets_received_lo,
-		       qstats->total_multicast_packets_received_lo);
-		ADD_64(fstats->total_broadcast_packets_received_hi,
-		       qstats->total_broadcast_packets_received_hi,
-		       fstats->total_broadcast_packets_received_lo,
-		       qstats->total_broadcast_packets_received_lo);
-		ADD_64(fstats->total_unicast_packets_transmitted_hi,
-		       qstats->total_unicast_packets_transmitted_hi,
-		       fstats->total_unicast_packets_transmitted_lo,
-		       qstats->total_unicast_packets_transmitted_lo);
-		ADD_64(fstats->total_multicast_packets_transmitted_hi,
-		       qstats->total_multicast_packets_transmitted_hi,
-		       fstats->total_multicast_packets_transmitted_lo,
-		       qstats->total_multicast_packets_transmitted_lo);
-		ADD_64(fstats->total_broadcast_packets_transmitted_hi,
-		       qstats->total_broadcast_packets_transmitted_hi,
-		       fstats->total_broadcast_packets_transmitted_lo,
-		       qstats->total_broadcast_packets_transmitted_lo);
-		ADD_64(fstats->valid_bytes_received_hi,
-		       qstats->valid_bytes_received_hi,
-		       fstats->valid_bytes_received_lo,
-		       qstats->valid_bytes_received_lo);
-
-		ADD_64(estats->error_bytes_received_hi,
-		       qstats->error_bytes_received_hi,
-		       estats->error_bytes_received_lo,
-		       qstats->error_bytes_received_lo);
-		ADD_64(estats->etherstatsoverrsizepkts_hi,
-		       qstats->etherstatsoverrsizepkts_hi,
-		       estats->etherstatsoverrsizepkts_lo,
-		       qstats->etherstatsoverrsizepkts_lo);
-		ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
-		       estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
-	}
-
-	ADD_64(fstats->total_bytes_received_hi,
-	       estats->rx_stat_ifhcinbadoctets_hi,
-	       fstats->total_bytes_received_lo,
-	       estats->rx_stat_ifhcinbadoctets_lo);
-
-	memcpy(estats, &(fstats->total_bytes_received_hi),
-	       sizeof(struct host_func_stats) - 2*sizeof(u32));
-
-	ADD_64(estats->etherstatsoverrsizepkts_hi,
-	       estats->rx_stat_dot3statsframestoolong_hi,
-	       estats->etherstatsoverrsizepkts_lo,
-	       estats->rx_stat_dot3statsframestoolong_lo);
-	ADD_64(estats->error_bytes_received_hi,
-	       estats->rx_stat_ifhcinbadoctets_hi,
-	       estats->error_bytes_received_lo,
-	       estats->rx_stat_ifhcinbadoctets_lo);
-
-	if (bp->port.pmf) {
-		estats->mac_filter_discard =
-				le32_to_cpu(tport->mac_filter_discard);
-		estats->xxoverflow_discard =
-				le32_to_cpu(tport->xxoverflow_discard);
-		estats->brb_truncate_discard =
-				le32_to_cpu(tport->brb_truncate_discard);
-		estats->mac_discard = le32_to_cpu(tport->mac_discard);
-	}
-
-	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
-
-	bp->stats_pending = 0;
-
-	return 0;
-}
-
-static void bnx2x_net_stats_update(struct bnx2x *bp)
-{
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-	struct net_device_stats *nstats = &bp->dev->stats;
-	int i;
-
-	nstats->rx_packets =
-		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
-		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
-		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
-
-	nstats->tx_packets =
-		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
-		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
-		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
-
-	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
-
-	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
-
-	nstats->rx_dropped = estats->mac_discard;
-	for_each_queue(bp, i)
-		nstats->rx_dropped +=
-			le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
-
-	nstats->tx_dropped = 0;
-
-	nstats->multicast =
-		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
-
-	nstats->collisions =
-		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
-
-	nstats->rx_length_errors =
-		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
-		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
-	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
-				 bnx2x_hilo(&estats->brb_truncate_hi);
-	nstats->rx_crc_errors =
-		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
-	nstats->rx_frame_errors =
-		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
-	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
-	nstats->rx_missed_errors = estats->xxoverflow_discard;
-
-	nstats->rx_errors = nstats->rx_length_errors +
-			    nstats->rx_over_errors +
-			    nstats->rx_crc_errors +
-			    nstats->rx_frame_errors +
-			    nstats->rx_fifo_errors +
-			    nstats->rx_missed_errors;
-
-	nstats->tx_aborted_errors =
-		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
-		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
-	nstats->tx_carrier_errors =
-		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
-	nstats->tx_fifo_errors = 0;
-	nstats->tx_heartbeat_errors = 0;
-	nstats->tx_window_errors = 0;
-
-	nstats->tx_errors = nstats->tx_aborted_errors +
-			    nstats->tx_carrier_errors +
-	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
-}
-
-static void bnx2x_drv_stats_update(struct bnx2x *bp)
-{
-	struct bnx2x_eth_stats *estats = &bp->eth_stats;
-	int i;
-
-	estats->driver_xoff = 0;
-	estats->rx_err_discard_pkt = 0;
-	estats->rx_skb_alloc_failed = 0;
-	estats->hw_csum_err = 0;
-	for_each_queue(bp, i) {
-		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
-
-		estats->driver_xoff += qstats->driver_xoff;
-		estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
-		estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
-		estats->hw_csum_err += qstats->hw_csum_err;
-	}
-}
-
-static void bnx2x_stats_update(struct bnx2x *bp)
-{
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	if (*stats_comp != DMAE_COMP_VAL)
-		return;
-
-	if (bp->port.pmf)
-		bnx2x_hw_stats_update(bp);
-
-	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
-		BNX2X_ERR("storm stats were not updated for 3 times\n");
-		bnx2x_panic();
-		return;
-	}
-
-	bnx2x_net_stats_update(bp);
-	bnx2x_drv_stats_update(bp);
-
-	if (netif_msg_timer(bp)) {
-		struct bnx2x_eth_stats *estats = &bp->eth_stats;
-		int i;
-
-		printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
-		       bp->dev->name,
-		       estats->brb_drop_lo, estats->brb_truncate_lo);
-
-		for_each_queue(bp, i) {
-			struct bnx2x_fastpath *fp = &bp->fp[i];
-			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-
-			printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
-					  "  rx pkt(%lu)  rx calls(%lu %lu)\n",
-			       fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
-			       fp->rx_comp_cons),
-			       le16_to_cpu(*fp->rx_cons_sb),
-			       bnx2x_hilo(&qstats->
-					  total_unicast_packets_received_hi),
-			       fp->rx_calls, fp->rx_pkt);
-		}
-
-		for_each_queue(bp, i) {
-			struct bnx2x_fastpath *fp = &bp->fp[i];
-			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-			struct netdev_queue *txq =
-				netdev_get_tx_queue(bp->dev, i);
-
-			printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
-					  "  tx pkt(%lu) tx calls (%lu)"
-					  "  %s (Xoff events %u)\n",
-			       fp->name, bnx2x_tx_avail(fp),
-			       le16_to_cpu(*fp->tx_cons_sb),
-			       bnx2x_hilo(&qstats->
-					  total_unicast_packets_transmitted_hi),
-			       fp->tx_pkt,
-			       (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
-			       qstats->driver_xoff);
-		}
-	}
-
-	bnx2x_hw_stats_post(bp);
-	bnx2x_storm_stats_post(bp);
-}
-
-static void bnx2x_port_stats_stop(struct bnx2x *bp)
-{
-	struct dmae_command *dmae;
-	u32 opcode;
-	int loader_idx = PMF_DMAE_C(bp);
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	bp->executer_idx = 0;
-
-	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-		  DMAE_CMD_C_ENABLE |
-		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-		  DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-
-	if (bp->port.port_stx) {
-
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		if (bp->func_stx)
-			dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
-		else
-			dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-		dmae->dst_addr_lo = bp->port.port_stx >> 2;
-		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_port_stats) >> 2;
-		if (bp->func_stx) {
-			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-			dmae->comp_addr_hi = 0;
-			dmae->comp_val = 1;
-		} else {
-			dmae->comp_addr_lo =
-				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-			dmae->comp_addr_hi =
-				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-			dmae->comp_val = DMAE_COMP_VAL;
-
-			*stats_comp = 0;
-		}
-	}
-
-	if (bp->func_stx) {
-
-		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-		dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-		dmae->dst_addr_lo = bp->func_stx >> 2;
-		dmae->dst_addr_hi = 0;
-		dmae->len = sizeof(struct host_func_stats) >> 2;
-		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-		dmae->comp_val = DMAE_COMP_VAL;
-
-		*stats_comp = 0;
-	}
-}
-
-static void bnx2x_stats_stop(struct bnx2x *bp)
-{
-	int update = 0;
-
-	bnx2x_stats_comp(bp);
-
-	if (bp->port.pmf)
-		update = (bnx2x_hw_stats_update(bp) == 0);
-
-	update |= (bnx2x_storm_stats_update(bp) == 0);
-
-	if (update) {
-		bnx2x_net_stats_update(bp);
-
-		if (bp->port.pmf)
-			bnx2x_port_stats_stop(bp);
-
-		bnx2x_hw_stats_post(bp);
-		bnx2x_stats_comp(bp);
-	}
-}
-
-static void bnx2x_stats_do_nothing(struct bnx2x *bp)
-{
-}
-
-static const struct {
-	void (*action)(struct bnx2x *bp);
-	enum bnx2x_stats_state next_state;
-} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
-/* state	event	*/
-{
-/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
-/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
-/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
-/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
-},
-{
-/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
-/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
-/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
-/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
-}
-};
-
-static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
-{
-	enum bnx2x_stats_state state = bp->stats_state;
-
-	if (unlikely(bp->panic))
-		return;
-
-	bnx2x_stats_stm[state][event].action(bp);
-	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
-
-	/* Make sure the state has been "changed" */
-	smp_wmb();
-
-	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
-		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
-		   state, event, bp->stats_state);
-}
-
-static void bnx2x_port_stats_base_init(struct bnx2x *bp)
-{
-	struct dmae_command *dmae;
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	/* sanity */
-	if (!bp->port.pmf || !bp->port.port_stx) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	bp->executer_idx = 0;
-
-	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-			DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-			DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-	dmae->dst_addr_lo = bp->port.port_stx >> 2;
-	dmae->dst_addr_hi = 0;
-	dmae->len = sizeof(struct host_port_stats) >> 2;
-	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_val = DMAE_COMP_VAL;
-
-	*stats_comp = 0;
-	bnx2x_hw_stats_post(bp);
-	bnx2x_stats_comp(bp);
-}
-
-static void bnx2x_func_stats_base_init(struct bnx2x *bp)
-{
-	int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
-	int port = BP_PORT(bp);
-	int func;
-	u32 func_stx;
-
-	/* sanity */
-	if (!bp->port.pmf || !bp->func_stx) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	/* save our func_stx */
-	func_stx = bp->func_stx;
-
-	for (vn = VN_0; vn < vn_max; vn++) {
-		func = 2*vn + port;
-
-		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
-		bnx2x_func_stats_init(bp);
-		bnx2x_hw_stats_post(bp);
-		bnx2x_stats_comp(bp);
-	}
-
-	/* restore our func_stx */
-	bp->func_stx = func_stx;
-}
-
-static void bnx2x_func_stats_base_update(struct bnx2x *bp)
-{
-	struct dmae_command *dmae = &bp->stats_dmae;
-	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-	/* sanity */
-	if (!bp->func_stx) {
-		BNX2X_ERR("BUG!\n");
-		return;
-	}
-
-	bp->executer_idx = 0;
-	memset(dmae, 0, sizeof(struct dmae_command));
-
-	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-			DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-			DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-	dmae->src_addr_lo = bp->func_stx >> 2;
-	dmae->src_addr_hi = 0;
-	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
-	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
-	dmae->len = sizeof(struct host_func_stats) >> 2;
-	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-	dmae->comp_val = DMAE_COMP_VAL;
-
-	*stats_comp = 0;
-	bnx2x_hw_stats_post(bp);
-	bnx2x_stats_comp(bp);
-}
-
-static void bnx2x_stats_init(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	int i;
-
-	bp->stats_pending = 0;
-	bp->executer_idx = 0;
-	bp->stats_counter = 0;
-
-	/* port and func stats for management */
-	if (!BP_NOMCP(bp)) {
-		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
-		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
-
-	} else {
-		bp->port.port_stx = 0;
-		bp->func_stx = 0;
-	}
-	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
-	   bp->port.port_stx, bp->func_stx);
-
-	/* port stats */
-	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
-	bp->port.old_nig_stats.brb_discard =
-			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
-	bp->port.old_nig_stats.brb_truncate =
-			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
-	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
-		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
-	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
-		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
-
-	/* function stats */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		memset(&fp->old_tclient, 0,
-		       sizeof(struct tstorm_per_client_stats));
-		memset(&fp->old_uclient, 0,
-		       sizeof(struct ustorm_per_client_stats));
-		memset(&fp->old_xclient, 0,
-		       sizeof(struct xstorm_per_client_stats));
-		memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
-	}
-
-	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
-	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
-
-	bp->stats_state = STATS_STATE_DISABLED;
-
-	if (bp->port.pmf) {
-		if (bp->port.port_stx)
-			bnx2x_port_stats_base_init(bp);
-
-		if (bp->func_stx)
-			bnx2x_func_stats_base_init(bp);
-
-	} else if (bp->func_stx)
-		bnx2x_func_stats_base_update(bp);
-}
-
-static void bnx2x_timer(unsigned long data)
-{
-	struct bnx2x *bp = (struct bnx2x *) data;
-
-	if (!netif_running(bp->dev))
-		return;
-
-	if (atomic_read(&bp->intr_sem) != 0)
-		goto timer_restart;
-
-	if (poll) {
-		struct bnx2x_fastpath *fp = &bp->fp[0];
-		int rc;
-
-		bnx2x_tx_int(fp);
-		rc = bnx2x_rx_int(fp, 1000);
-	}
-
-	if (!BP_NOMCP(bp)) {
-		int func = BP_FUNC(bp);
-		u32 drv_pulse;
-		u32 mcp_pulse;
-
-		++bp->fw_drv_pulse_wr_seq;
-		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
-		/* TBD - add SYSTEM_TIME */
-		drv_pulse = bp->fw_drv_pulse_wr_seq;
-		SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
-
-		mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
-			     MCP_PULSE_SEQ_MASK);
-		/* The delta between driver pulse and mcp response
-		 * should be 1 (before mcp response) or 0 (after mcp response)
-		 */
-		if ((drv_pulse != mcp_pulse) &&
-		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
-			/* someone lost a heartbeat... */
-			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
-				  drv_pulse, mcp_pulse);
-		}
-	}
-
-	if (bp->state == BNX2X_STATE_OPEN)
-		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
-
-timer_restart:
-	mod_timer(&bp->timer, jiffies + bp->current_interval);
-}
-
-/* end of Statistics */
-
-/* nic init */
-
-/*
- * nic init service functions
- */
-
-static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
-{
-	int port = BP_PORT(bp);
-
-	/* "CSTORM" */
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-			CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
-			CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-			CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
-			CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
-}
-
-static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
-			  dma_addr_t mapping, int sb_id)
-{
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	int index;
-	u64 section;
-
-	/* USTORM */
-	section = ((u64)mapping) + offsetof(struct host_status_block,
-					    u_status_block);
-	sb->u_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
-		CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
-
-	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
-
-	/* CSTORM */
-	section = ((u64)mapping) + offsetof(struct host_status_block,
-					    c_status_block);
-	sb->c_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
-		CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
-
-	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
-
-	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-}
-
-static void bnx2x_zero_def_sb(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-
-	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
-			TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
-			sizeof(struct tstorm_def_status_block)/4);
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-			CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
-			sizeof(struct cstorm_def_status_block_u)/4);
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-			CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
-			sizeof(struct cstorm_def_status_block_c)/4);
-	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
-			XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
-			sizeof(struct xstorm_def_status_block)/4);
-}
-
-static void bnx2x_init_def_sb(struct bnx2x *bp,
-			      struct host_def_status_block *def_sb,
-			      dma_addr_t mapping, int sb_id)
-{
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	int index, val, reg_offset;
-	u64 section;
-
-	/* ATTN */
-	section = ((u64)mapping) + offsetof(struct host_def_status_block,
-					    atten_status_block);
-	def_sb->atten_status_block.status_block_id = sb_id;
-
-	bp->attn_state = 0;
-
-	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-
-	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
-		bp->attn_group[index].sig[0] = REG_RD(bp,
-						     reg_offset + 0x10*index);
-		bp->attn_group[index].sig[1] = REG_RD(bp,
-					       reg_offset + 0x4 + 0x10*index);
-		bp->attn_group[index].sig[2] = REG_RD(bp,
-					       reg_offset + 0x8 + 0x10*index);
-		bp->attn_group[index].sig[3] = REG_RD(bp,
-					       reg_offset + 0xc + 0x10*index);
-	}
-
-	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
-			     HC_REG_ATTN_MSG0_ADDR_L);
-
-	REG_WR(bp, reg_offset, U64_LO(section));
-	REG_WR(bp, reg_offset + 4, U64_HI(section));
-
-	reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
-
-	val = REG_RD(bp, reg_offset);
-	val |= sb_id;
-	REG_WR(bp, reg_offset, val);
-
-	/* USTORM */
-	section = ((u64)mapping) + offsetof(struct host_def_status_block,
-					    u_def_status_block);
-	def_sb->u_def_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
-		CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
-
-	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
-
-	/* CSTORM */
-	section = ((u64)mapping) + offsetof(struct host_def_status_block,
-					    c_def_status_block);
-	def_sb->c_def_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
-	REG_WR(bp, BAR_CSTRORM_INTMEM +
-	       ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
-		CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
-
-	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
-
-	/* TSTORM */
-	section = ((u64)mapping) + offsetof(struct host_def_status_block,
-					    t_def_status_block);
-	def_sb->t_def_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
-		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-
-	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_TSTRORM_INTMEM +
-			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
-
-	/* XSTORM */
-	section = ((u64)mapping) + offsetof(struct host_def_status_block,
-					    x_def_status_block);
-	def_sb->x_def_status_block.status_block_id = sb_id;
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
-	       U64_HI(section));
-	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
-		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-
-	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
-		REG_WR16(bp, BAR_XSTRORM_INTMEM +
-			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
-
-	bp->stats_pending = 0;
-	bp->set_mac_pending = 0;
-
-	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-}
-
-static void bnx2x_update_coalesce(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int i;
-
-	for_each_queue(bp, i) {
-		int sb_id = bp->fp[i].sb_id;
-
-		/* HC_INDEX_U_ETH_RX_CQ_CONS */
-		REG_WR8(bp, BAR_CSTRORM_INTMEM +
-			CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
-						      U_SB_ETH_RX_CQ_INDEX),
-			bp->rx_ticks/(4 * BNX2X_BTR));
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
-						       U_SB_ETH_RX_CQ_INDEX),
-			 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
-
-		/* HC_INDEX_C_ETH_TX_CQ_CONS */
-		REG_WR8(bp, BAR_CSTRORM_INTMEM +
-			CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
-						      C_SB_ETH_TX_CQ_INDEX),
-			bp->tx_ticks/(4 * BNX2X_BTR));
-		REG_WR16(bp, BAR_CSTRORM_INTMEM +
-			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
-						       C_SB_ETH_TX_CQ_INDEX),
-			 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
-	}
-}
-
-static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
-				       struct bnx2x_fastpath *fp, int last)
-{
-	int i;
-
-	for (i = 0; i < last; i++) {
-		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
-		struct sk_buff *skb = rx_buf->skb;
-
-		if (skb == NULL) {
-			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
-			continue;
-		}
-
-		if (fp->tpa_state[i] == BNX2X_TPA_START)
-			dma_unmap_single(&bp->pdev->dev,
-					 dma_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, DMA_FROM_DEVICE);
-
-		dev_kfree_skb(skb);
-		rx_buf->skb = NULL;
-	}
-}
-
-static void bnx2x_init_rx_rings(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-					      ETH_MAX_AGGREGATION_QUEUES_E1H;
-	u16 ring_prod, cqe_ring_prod;
-	int i, j;
-
-	bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
-	DP(NETIF_MSG_IFUP,
-	   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
-	if (bp->flags & TPA_ENABLE_FLAG) {
-
-		for_each_queue(bp, j) {
-			struct bnx2x_fastpath *fp = &bp->fp[j];
-
-			for (i = 0; i < max_agg_queues; i++) {
-				fp->tpa_pool[i].skb =
-				   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-				if (!fp->tpa_pool[i].skb) {
-					BNX2X_ERR("Failed to allocate TPA "
-						  "skb pool for queue[%d] - "
-						  "disabling TPA on this "
-						  "queue!\n", j);
-					bnx2x_free_tpa_pool(bp, fp, i);
-					fp->disable_tpa = 1;
-					break;
-				}
-				dma_unmap_addr_set((struct sw_rx_bd *)
-							&bp->fp->tpa_pool[i],
-						   mapping, 0);
-				fp->tpa_state[i] = BNX2X_TPA_STOP;
-			}
-		}
-	}
-
-	for_each_queue(bp, j) {
-		struct bnx2x_fastpath *fp = &bp->fp[j];
-
-		fp->rx_bd_cons = 0;
-		fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
-		fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
-
-		/* "next page" elements initialization */
-		/* SGE ring */
-		for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
-			struct eth_rx_sge *sge;
-
-			sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
-			sge->addr_hi =
-				cpu_to_le32(U64_HI(fp->rx_sge_mapping +
-					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-			sge->addr_lo =
-				cpu_to_le32(U64_LO(fp->rx_sge_mapping +
-					BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-		}
-
-		bnx2x_init_sge_ring_bit_mask(fp);
-
-		/* RX BD ring */
-		for (i = 1; i <= NUM_RX_RINGS; i++) {
-			struct eth_rx_bd *rx_bd;
-
-			rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
-			rx_bd->addr_hi =
-				cpu_to_le32(U64_HI(fp->rx_desc_mapping +
-					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
-			rx_bd->addr_lo =
-				cpu_to_le32(U64_LO(fp->rx_desc_mapping +
-					    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
-		}
-
-		/* CQ ring */
-		for (i = 1; i <= NUM_RCQ_RINGS; i++) {
-			struct eth_rx_cqe_next_page *nextpg;
-
-			nextpg = (struct eth_rx_cqe_next_page *)
-				&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
-			nextpg->addr_hi =
-				cpu_to_le32(U64_HI(fp->rx_comp_mapping +
-					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-			nextpg->addr_lo =
-				cpu_to_le32(U64_LO(fp->rx_comp_mapping +
-					   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-		}
-
-		/* Allocate SGEs and initialize the ring elements */
-		for (i = 0, ring_prod = 0;
-		     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
-
-			if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
-				BNX2X_ERR("was only able to allocate "
-					  "%d rx sges\n", i);
-				BNX2X_ERR("disabling TPA for queue[%d]\n", j);
-				/* Cleanup already allocated elements */
-				bnx2x_free_rx_sge_range(bp, fp, ring_prod);
-				bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
-				fp->disable_tpa = 1;
-				ring_prod = 0;
-				break;
-			}
-			ring_prod = NEXT_SGE_IDX(ring_prod);
-		}
-		fp->rx_sge_prod = ring_prod;
-
-		/* Allocate BDs and initialize BD ring */
-		fp->rx_comp_cons = 0;
-		cqe_ring_prod = ring_prod = 0;
-		for (i = 0; i < bp->rx_ring_size; i++) {
-			if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
-				BNX2X_ERR("was only able to allocate "
-					  "%d rx skbs on queue[%d]\n", i, j);
-				fp->eth_q_stats.rx_skb_alloc_failed++;
-				break;
-			}
-			ring_prod = NEXT_RX_IDX(ring_prod);
-			cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
-			WARN_ON(ring_prod <= i);
-		}
-
-		fp->rx_bd_prod = ring_prod;
-		/* must not have more available CQEs than BDs */
-		fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
-					 cqe_ring_prod);
-		fp->rx_pkt = fp->rx_calls = 0;
-
-		/* Warning!
-		 * this will generate an interrupt (to the TSTORM)
-		 * must only be done after chip is initialized
-		 */
-		bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
-				     fp->rx_sge_prod);
-		if (j != 0)
-			continue;
-
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
-		       U64_LO(fp->rx_comp_mapping));
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
-		       U64_HI(fp->rx_comp_mapping));
-	}
-}
-
-static void bnx2x_init_tx_ring(struct bnx2x *bp)
-{
-	int i, j;
-
-	for_each_queue(bp, j) {
-		struct bnx2x_fastpath *fp = &bp->fp[j];
-
-		for (i = 1; i <= NUM_TX_RINGS; i++) {
-			struct eth_tx_next_bd *tx_next_bd =
-				&fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
-
-			tx_next_bd->addr_hi =
-				cpu_to_le32(U64_HI(fp->tx_desc_mapping +
-					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-			tx_next_bd->addr_lo =
-				cpu_to_le32(U64_LO(fp->tx_desc_mapping +
-					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-		}
-
-		fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
-		fp->tx_db.data.zero_fill1 = 0;
-		fp->tx_db.data.prod = 0;
-
-		fp->tx_pkt_prod = 0;
-		fp->tx_pkt_cons = 0;
-		fp->tx_bd_prod = 0;
-		fp->tx_bd_cons = 0;
-		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
-		fp->tx_pkt = 0;
-	}
-}
-
-static void bnx2x_init_sp_ring(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-
-	spin_lock_init(&bp->spq_lock);
-
-	bp->spq_left = MAX_SPQ_PENDING;
-	bp->spq_prod_idx = 0;
-	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
-	bp->spq_prod_bd = bp->spq;
-	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
-
-	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
-	       U64_LO(bp->spq_mapping));
-	REG_WR(bp,
-	       XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
-	       U64_HI(bp->spq_mapping));
-
-	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
-	       bp->spq_prod_idx);
-}
-
-static void bnx2x_init_context(struct bnx2x *bp)
-{
-	int i;
-
-	/* Rx */
-	for_each_queue(bp, i) {
-		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-		u8 cl_id = fp->cl_id;
-
-		context->ustorm_st_context.common.sb_index_numbers =
-						BNX2X_RX_SB_INDEX_NUM;
-		context->ustorm_st_context.common.clientId = cl_id;
-		context->ustorm_st_context.common.status_block_id = fp->sb_id;
-		context->ustorm_st_context.common.flags =
-			(USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
-			 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
-		context->ustorm_st_context.common.statistics_counter_id =
-						cl_id;
-		context->ustorm_st_context.common.mc_alignment_log_size =
-						BNX2X_RX_ALIGN_SHIFT;
-		context->ustorm_st_context.common.bd_buff_size =
-						bp->rx_buf_size;
-		context->ustorm_st_context.common.bd_page_base_hi =
-						U64_HI(fp->rx_desc_mapping);
-		context->ustorm_st_context.common.bd_page_base_lo =
-						U64_LO(fp->rx_desc_mapping);
-		if (!fp->disable_tpa) {
-			context->ustorm_st_context.common.flags |=
-				USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
-			context->ustorm_st_context.common.sge_buff_size =
-				(u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
-					   0xffff);
-			context->ustorm_st_context.common.sge_page_base_hi =
-						U64_HI(fp->rx_sge_mapping);
-			context->ustorm_st_context.common.sge_page_base_lo =
-						U64_LO(fp->rx_sge_mapping);
-
-			context->ustorm_st_context.common.max_sges_for_packet =
-				SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
-			context->ustorm_st_context.common.max_sges_for_packet =
-				((context->ustorm_st_context.common.
-				  max_sges_for_packet + PAGES_PER_SGE - 1) &
-				 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
-		}
-
-		context->ustorm_ag_context.cdu_usage =
-			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
-					       CDU_REGION_NUMBER_UCM_AG,
-					       ETH_CONNECTION_TYPE);
-
-		context->xstorm_ag_context.cdu_reserved =
-			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
-					       CDU_REGION_NUMBER_XCM_AG,
-					       ETH_CONNECTION_TYPE);
-	}
-
-	/* Tx */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-		struct eth_context *context =
-			bnx2x_sp(bp, context[i].eth);
-
-		context->cstorm_st_context.sb_index_number =
-						C_SB_ETH_TX_CQ_INDEX;
-		context->cstorm_st_context.status_block_id = fp->sb_id;
-
-		context->xstorm_st_context.tx_bd_page_base_hi =
-						U64_HI(fp->tx_desc_mapping);
-		context->xstorm_st_context.tx_bd_page_base_lo =
-						U64_LO(fp->tx_desc_mapping);
-		context->xstorm_st_context.statistics_data = (fp->cl_id |
-				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
-	}
-}
-
-static void bnx2x_init_ind_table(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-	int i;
-
-	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-		return;
-
-	DP(NETIF_MSG_IFUP,
-	   "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		REG_WR8(bp, BAR_TSTRORM_INTMEM +
-			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-			bp->fp->cl_id + (i % bp->num_queues));
-}
-
-static void bnx2x_set_client_config(struct bnx2x *bp)
-{
-	struct tstorm_eth_client_config tstorm_client = {0};
-	int port = BP_PORT(bp);
-	int i;
-
-	tstorm_client.mtu = bp->dev->mtu;
-	tstorm_client.config_flags =
-				(TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
-				 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
-#ifdef BCM_VLAN
-	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
-		tstorm_client.config_flags |=
-				TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
-		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
-	}
-#endif
-
-	for_each_queue(bp, i) {
-		tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
-
-		REG_WR(bp, BAR_TSTRORM_INTMEM +
-		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
-		       ((u32 *)&tstorm_client)[0]);
-		REG_WR(bp, BAR_TSTRORM_INTMEM +
-		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
-		       ((u32 *)&tstorm_client)[1]);
-	}
-
-	DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
-	   ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
-}
-
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
-{
-	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
-	int mode = bp->rx_mode;
-	int mask = bp->rx_mode_cl_mask;
-	int func = BP_FUNC(bp);
-	int port = BP_PORT(bp);
-	int i;
-	/* All but management unicast packets should pass to the host as well */
-	u32 llh_mask =
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
-
-	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
-
-	switch (mode) {
-	case BNX2X_RX_MODE_NONE: /* no Rx */
-		tstorm_mac_filter.ucast_drop_all = mask;
-		tstorm_mac_filter.mcast_drop_all = mask;
-		tstorm_mac_filter.bcast_drop_all = mask;
-		break;
-
-	case BNX2X_RX_MODE_NORMAL:
-		tstorm_mac_filter.bcast_accept_all = mask;
-		break;
-
-	case BNX2X_RX_MODE_ALLMULTI:
-		tstorm_mac_filter.mcast_accept_all = mask;
-		tstorm_mac_filter.bcast_accept_all = mask;
-		break;
-
-	case BNX2X_RX_MODE_PROMISC:
-		tstorm_mac_filter.ucast_accept_all = mask;
-		tstorm_mac_filter.mcast_accept_all = mask;
-		tstorm_mac_filter.bcast_accept_all = mask;
-		/* pass management unicast packets as well */
-		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
-		break;
-
-	default:
-		BNX2X_ERR("BAD rx mode (%d)\n", mode);
-		break;
-	}
-
-	REG_WR(bp,
-	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
-	       llh_mask);
-
-	for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
-		REG_WR(bp, BAR_TSTRORM_INTMEM +
-		       TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
-		       ((u32 *)&tstorm_mac_filter)[i]);
-
-/*		DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
-		   ((u32 *)&tstorm_mac_filter)[i]); */
-	}
-
-	if (mode != BNX2X_RX_MODE_NONE)
-		bnx2x_set_client_config(bp);
-}
-
-static void bnx2x_init_internal_common(struct bnx2x *bp)
-{
-	int i;
-
-	/* Zero this manually as its initialization is
-	   currently missing in the initTool */
-	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
-}
-
-static void bnx2x_init_internal_port(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-
-	REG_WR(bp,
-	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
-	REG_WR(bp,
-	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
-}
-
-static void bnx2x_init_internal_func(struct bnx2x *bp)
-{
-	struct tstorm_eth_function_common_config tstorm_config = {0};
-	struct stats_indication_flags stats_flags = {0};
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	int i, j;
-	u32 offset;
-	u16 max_agg_size;
-
-	tstorm_config.config_flags = RSS_FLAGS(bp);
-
-	if (is_multi(bp))
-		tstorm_config.rss_result_mask = MULTI_MASK;
-
-	/* Enable TPA if needed */
-	if (bp->flags & TPA_ENABLE_FLAG)
-		tstorm_config.config_flags |=
-			TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
-
-	if (IS_E1HMF(bp))
-		tstorm_config.config_flags |=
-				TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
-
-	tstorm_config.leading_client_id = BP_L_ID(bp);
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
-	       (*(u32 *)&tstorm_config));
-
-	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
-	bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
-	bnx2x_set_storm_rx_mode(bp);
-
-	for_each_queue(bp, i) {
-		u8 cl_id = bp->fp[i].cl_id;
-
-		/* reset xstorm per client statistics */
-		offset = BAR_XSTRORM_INTMEM +
-			 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-		for (j = 0;
-		     j < sizeof(struct xstorm_per_client_stats) / 4; j++)
-			REG_WR(bp, offset + j*4, 0);
-
-		/* reset tstorm per client statistics */
-		offset = BAR_TSTRORM_INTMEM +
-			 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-		for (j = 0;
-		     j < sizeof(struct tstorm_per_client_stats) / 4; j++)
-			REG_WR(bp, offset + j*4, 0);
-
-		/* reset ustorm per client statistics */
-		offset = BAR_USTRORM_INTMEM +
-			 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-		for (j = 0;
-		     j < sizeof(struct ustorm_per_client_stats) / 4; j++)
-			REG_WR(bp, offset + j*4, 0);
-	}
-
-	/* Init statistics related context */
-	stats_flags.collect_eth = 1;
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
-	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
-	       ((u32 *)&stats_flags)[1]);
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
-	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
-	       ((u32 *)&stats_flags)[1]);
-
-	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
-	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
-	       ((u32 *)&stats_flags)[1]);
-
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
-	       ((u32 *)&stats_flags)[0]);
-	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
-	       ((u32 *)&stats_flags)[1]);
-
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-	REG_WR(bp, BAR_XSTRORM_INTMEM +
-	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-	REG_WR(bp, BAR_TSTRORM_INTMEM +
-	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-	REG_WR(bp, BAR_USTRORM_INTMEM +
-	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-	REG_WR(bp, BAR_USTRORM_INTMEM +
-	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-	if (CHIP_IS_E1H(bp)) {
-		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
-			IS_E1HMF(bp));
-		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
-			IS_E1HMF(bp));
-		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
-			IS_E1HMF(bp));
-		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
-			IS_E1HMF(bp));
-
-		REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
-			 bp->e1hov);
-	}
-
-	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
-	max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
-				   SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
-		       U64_LO(fp->rx_comp_mapping));
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
-		       U64_HI(fp->rx_comp_mapping));
-
-		/* Next page */
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
-		       U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
-		REG_WR(bp, BAR_USTRORM_INTMEM +
-		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
-		       U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
-
-		REG_WR16(bp, BAR_USTRORM_INTMEM +
-			 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
-			 max_agg_size);
-	}
-
-	/* dropless flow control */
-	if (CHIP_IS_E1H(bp)) {
-		struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
-
-		rx_pause.bd_thr_low = 250;
-		rx_pause.cqe_thr_low = 250;
-		rx_pause.cos = 1;
-		rx_pause.sge_thr_low = 0;
-		rx_pause.bd_thr_high = 350;
-		rx_pause.cqe_thr_high = 350;
-		rx_pause.sge_thr_high = 0;
-
-		for_each_queue(bp, i) {
-			struct bnx2x_fastpath *fp = &bp->fp[i];
-
-			if (!fp->disable_tpa) {
-				rx_pause.sge_thr_low = 150;
-				rx_pause.sge_thr_high = 250;
-			}
-
-
-			offset = BAR_USTRORM_INTMEM +
-				 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
-								   fp->cl_id);
-			for (j = 0;
-			     j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
-			     j++)
-				REG_WR(bp, offset + j*4,
-				       ((u32 *)&rx_pause)[j]);
-		}
-	}
-
-	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
-
-	/* Init rate shaping and fairness contexts */
-	if (IS_E1HMF(bp)) {
-		int vn;
-
-		/* During init there is no active link
-		   Until link is up, set link rate to 10Gbps */
-		bp->link_vars.line_speed = SPEED_10000;
-		bnx2x_init_port_minmax(bp);
-
-		if (!BP_NOMCP(bp))
-			bp->mf_config =
-			      SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-		bnx2x_calc_vn_weight_sum(bp);
-
-		for (vn = VN_0; vn < E1HVN_MAX; vn++)
-			bnx2x_init_vn_minmax(bp, 2*vn + port);
-
-		/* Enable rate shaping and fairness */
-		bp->cmng.flags.cmng_enables |=
-					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-
-	} else {
-		/* rate shaping and fairness are disabled */
-		DP(NETIF_MSG_IFUP,
-		   "single function mode  minmax will be disabled\n");
-	}
-
-
-	/* Store cmng structures to internal memory */
-	if (bp->port.pmf)
-		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
-			REG_WR(bp, BAR_XSTRORM_INTMEM +
-			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
-			       ((u32 *)(&bp->cmng))[i]);
-}
-
-static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
-{
-	switch (load_code) {
-	case FW_MSG_CODE_DRV_LOAD_COMMON:
-		bnx2x_init_internal_common(bp);
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_PORT:
-		bnx2x_init_internal_port(bp);
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
-		bnx2x_init_internal_func(bp);
-		break;
-
-	default:
-		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
-		break;
-	}
-}
-
-static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
-{
-	int i;
-
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		fp->bp = bp;
-		fp->state = BNX2X_FP_STATE_CLOSED;
-		fp->index = i;
-		fp->cl_id = BP_L_ID(bp) + i;
-#ifdef BCM_CNIC
-		fp->sb_id = fp->cl_id + 1;
-#else
-		fp->sb_id = fp->cl_id;
-#endif
-		DP(NETIF_MSG_IFUP,
-		   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
-		   i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
-		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
-			      fp->sb_id);
-		bnx2x_update_fpsb_idx(fp);
-	}
-
-	/* ensure status block indices were read */
-	rmb();
-
-
-	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
-			  DEF_SB_ID);
-	bnx2x_update_dsb_idx(bp);
-	bnx2x_update_coalesce(bp);
-	bnx2x_init_rx_rings(bp);
-	bnx2x_init_tx_ring(bp);
-	bnx2x_init_sp_ring(bp);
-	bnx2x_init_context(bp);
-	bnx2x_init_internal(bp, load_code);
-	bnx2x_init_ind_table(bp);
-	bnx2x_stats_init(bp);
-
-	/* At this point, we are ready for interrupts */
-	atomic_set(&bp->intr_sem, 0);
-
-	/* flush all before enabling interrupts */
-	mb();
-	mmiowb();
-
-	bnx2x_int_enable(bp);
-
-	/* Check for SPIO5 */
-	bnx2x_attn_int_deasserted0(bp,
-		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
-				   AEU_INPUTS_ATTN_BITS_SPIO5);
-}
-
-/* end of nic init */
-
-/*
- * gzip service functions
- */
-
-static int bnx2x_gunzip_init(struct bnx2x *bp)
-{
-	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
-					    &bp->gunzip_mapping, GFP_KERNEL);
-	if (bp->gunzip_buf  == NULL)
-		goto gunzip_nomem1;
-
-	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
-	if (bp->strm  == NULL)
-		goto gunzip_nomem2;
-
-	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
-				      GFP_KERNEL);
-	if (bp->strm->workspace == NULL)
-		goto gunzip_nomem3;
-
-	return 0;
-
-gunzip_nomem3:
-	kfree(bp->strm);
-	bp->strm = NULL;
-
-gunzip_nomem2:
-	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
-			  bp->gunzip_mapping);
-	bp->gunzip_buf = NULL;
-
-gunzip_nomem1:
-	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
-	       " un-compression\n");
-	return -ENOMEM;
-}
-
-static void bnx2x_gunzip_end(struct bnx2x *bp)
-{
-	kfree(bp->strm->workspace);
-
-	kfree(bp->strm);
-	bp->strm = NULL;
-
-	if (bp->gunzip_buf) {
-		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
-				  bp->gunzip_mapping);
-		bp->gunzip_buf = NULL;
-	}
-}
-
-static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
-{
-	int n, rc;
-
-	/* check gzip header */
-	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
-		BNX2X_ERR("Bad gzip header\n");
-		return -EINVAL;
-	}
-
-	n = 10;
-
-#define FNAME				0x8
-
-	if (zbuf[3] & FNAME)
-		while ((zbuf[n++] != 0) && (n < len));
-
-	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
-	bp->strm->avail_in = len - n;
-	bp->strm->next_out = bp->gunzip_buf;
-	bp->strm->avail_out = FW_BUF_SIZE;
-
-	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
-	if (rc != Z_OK)
-		return rc;
-
-	rc = zlib_inflate(bp->strm, Z_FINISH);
-	if ((rc != Z_OK) && (rc != Z_STREAM_END))
-		netdev_err(bp->dev, "Firmware decompression error: %s\n",
-			   bp->strm->msg);
-
-	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
-	if (bp->gunzip_outlen & 0x3)
-		netdev_err(bp->dev, "Firmware decompression error:"
-				    " gunzip_outlen (%d) not aligned\n",
-				bp->gunzip_outlen);
-	bp->gunzip_outlen >>= 2;
-
-	zlib_inflateEnd(bp->strm);
-
-	if (rc == Z_STREAM_END)
-		return 0;
-
-	return rc;
-}
-
-/* nic load/unload */
-
-/*
- * General service functions
- */
-
-/* send a NIG loopback debug packet */
-static void bnx2x_lb_pckt(struct bnx2x *bp)
-{
-	u32 wb_write[3];
-
-	/* Ethernet source and destination addresses */
-	wb_write[0] = 0x55555555;
-	wb_write[1] = 0x55555555;
-	wb_write[2] = 0x20;		/* SOP */
-	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
-
-	/* NON-IP protocol */
-	wb_write[0] = 0x09000000;
-	wb_write[1] = 0x55555555;
-	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
-	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
-}
-
-/* some of the internal memories
- * are not directly readable from the driver
- * to test them we send debug packets
- */
-static int bnx2x_int_mem_test(struct bnx2x *bp)
-{
-	int factor;
-	int count, i;
-	u32 val = 0;
-
-	if (CHIP_REV_IS_FPGA(bp))
-		factor = 120;
-	else if (CHIP_REV_IS_EMUL(bp))
-		factor = 200;
-	else
-		factor = 1;
-
-	DP(NETIF_MSG_HW, "start part1\n");
-
-	/* Disable inputs of parser neighbor blocks */
-	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
-	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
-	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
-
-	/*  Write 0 to parser credits for CFC search request */
-	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
-
-	/* send Ethernet packet */
-	bnx2x_lb_pckt(bp);
-
-	/* TODO do i reset NIG statistic? */
-	/* Wait until NIG register shows 1 packet of size 0x10 */
-	count = 1000 * factor;
-	while (count) {
-
-		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-		val = *bnx2x_sp(bp, wb_data[0]);
-		if (val == 0x10)
-			break;
-
-		msleep(10);
-		count--;
-	}
-	if (val != 0x10) {
-		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
-		return -1;
-	}
-
-	/* Wait until PRS register shows 1 packet */
-	count = 1000 * factor;
-	while (count) {
-		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-		if (val == 1)
-			break;
-
-		msleep(10);
-		count--;
-	}
-	if (val != 0x1) {
-		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
-		return -2;
-	}
-
-	/* Reset and init BRB, PRS */
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
-	msleep(50);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
-	msleep(50);
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-
-	DP(NETIF_MSG_HW, "part2\n");
-
-	/* Disable inputs of parser neighbor blocks */
-	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
-	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
-	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
-
-	/* Write 0 to parser credits for CFC search request */
-	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
-
-	/* send 10 Ethernet packets */
-	for (i = 0; i < 10; i++)
-		bnx2x_lb_pckt(bp);
-
-	/* Wait until NIG register shows 10 + 1
-	   packets of size 11*0x10 = 0xb0 */
-	count = 1000 * factor;
-	while (count) {
-
-		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-		val = *bnx2x_sp(bp, wb_data[0]);
-		if (val == 0xb0)
-			break;
-
-		msleep(10);
-		count--;
-	}
-	if (val != 0xb0) {
-		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
-		return -3;
-	}
-
-	/* Wait until PRS register shows 2 packets */
-	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-	if (val != 2)
-		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
-
-	/* Write 1 to parser credits for CFC search request */
-	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
-
-	/* Wait until PRS register shows 3 packets */
-	msleep(10 * factor);
-	/* Wait until NIG register shows 1 packet of size 0x10 */
-	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-	if (val != 3)
-		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
-
-	/* clear NIG EOP FIFO */
-	for (i = 0; i < 11; i++)
-		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
-	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
-	if (val != 1) {
-		BNX2X_ERR("clear of NIG failed\n");
-		return -4;
-	}
-
-	/* Reset and init BRB, PRS, NIG */
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
-	msleep(50);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
-	msleep(50);
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-#ifndef BCM_CNIC
-	/* set NIC mode */
-	REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-
-	/* Enable inputs of parser neighbor blocks */
-	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
-	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
-	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
-	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
-
-	DP(NETIF_MSG_HW, "done\n");
-
-	return 0; /* OK */
-}
-
-static void enable_blocks_attention(struct bnx2x *bp)
-{
-	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
-	REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
-	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
-	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
-	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
-	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
-	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
-	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
-	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
-/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
-/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
-	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
-	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
-	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
-/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
-/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
-	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
-	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
-	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
-	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
-/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
-/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
-	if (CHIP_REV_IS_FPGA(bp))
-		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
-	else
-		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
-	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
-	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
-	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
-/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
-/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
-	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
-	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
-/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
-	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
-}
-
-static const struct {
-	u32 addr;
-	u32 mask;
-} bnx2x_parity_mask[] = {
-	{PXP_REG_PXP_PRTY_MASK, 0xffffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
-	{HC_REG_HC_PRTY_MASK, 0xffffffff},
-	{MISC_REG_MISC_PRTY_MASK, 0xffffffff},
-	{QM_REG_QM_PRTY_MASK, 0x0},
-	{DORQ_REG_DORQ_PRTY_MASK, 0x0},
-	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
-	{CDU_REG_CDU_PRTY_MASK, 0x0},
-	{CFC_REG_CFC_PRTY_MASK, 0x0},
-	{DBG_REG_DBG_PRTY_MASK, 0x0},
-	{DMAE_REG_DMAE_PRTY_MASK, 0x0},
-	{BRB1_REG_BRB1_PRTY_MASK, 0x0},
-	{PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
-	{TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
-	{CSDM_REG_CSDM_PRTY_MASK, 0x8},	/* bit 3 */
-	{USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
-	{XSDM_REG_XSDM_PRTY_MASK, 0x8},	/* bit 3 */
-	{TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
-	{TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
-	{USEM_REG_USEM_PRTY_MASK_0, 0x0},
-	{USEM_REG_USEM_PRTY_MASK_1, 0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
-	int i, mask_arr_len =
-		sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
-
-	for (i = 0; i < mask_arr_len; i++)
-		REG_WR(bp, bnx2x_parity_mask[i].addr,
-			bnx2x_parity_mask[i].mask);
-}
-
-
-static void bnx2x_reset_common(struct bnx2x *bp)
-{
-	/* reset_common */
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-	       0xd3ffff7f);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
-}
-
-static void bnx2x_init_pxp(struct bnx2x *bp)
-{
-	u16 devctl;
-	int r_order, w_order;
-
-	pci_read_config_word(bp->pdev,
-			     bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
-	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
-	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
-	if (bp->mrrs == -1)
-		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
-	else {
-		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
-		r_order = bp->mrrs;
-	}
-
-	bnx2x_init_pxp_arb(bp, r_order, w_order);
-}
-
-static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
-{
-	int is_required;
-	u32 val;
-	int port;
-
-	if (BP_NOMCP(bp))
-		return;
-
-	is_required = 0;
-	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
-	      SHARED_HW_CFG_FAN_FAILURE_MASK;
-
-	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
-		is_required = 1;
-
-	/*
-	 * The fan failure mechanism is usually related to the PHY type since
-	 * the power consumption of the board is affected by the PHY. Currently,
-	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
-	 */
-	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
-		for (port = PORT_0; port < PORT_MAX; port++) {
-			u32 phy_type =
-				SHMEM_RD(bp, dev_info.port_hw_config[port].
-					 external_phy_config) &
-				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-			is_required |=
-				((phy_type ==
-				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
-				 (phy_type ==
-				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
-				 (phy_type ==
-				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
-		}
-
-	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
-
-	if (is_required == 0)
-		return;
-
-	/* Fan failure is indicated by SPIO 5 */
-	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
-		       MISC_REGISTERS_SPIO_INPUT_HI_Z);
-
-	/* set to active low mode */
-	val = REG_RD(bp, MISC_REG_SPIO_INT);
-	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
-					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
-	REG_WR(bp, MISC_REG_SPIO_INT, val);
-
-	/* enable interrupt to signal the IGU */
-	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
-	val |= (1 << MISC_REGISTERS_SPIO_5);
-	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
-}
-
-static int bnx2x_init_common(struct bnx2x *bp)
-{
-	u32 val, i;
-#ifdef BCM_CNIC
-	u32 wb_write[2];
-#endif
-
-	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
-
-	bnx2x_reset_common(bp);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
-
-	bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
-	if (CHIP_IS_E1H(bp))
-		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
-
-	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
-	msleep(30);
-	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
-
-	bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
-	if (CHIP_IS_E1(bp)) {
-		/* enable HW interrupt from PXP on USDM overflow
-		   bit 16 on INT_MASK_0 */
-		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
-	}
-
-	bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
-	bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
-	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
-	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
-	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
-	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
-	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-	/* make sure this value is 0 */
-	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
-	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
-	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
-	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
-	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
-	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
-#ifdef BCM_CNIC
-	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
-	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
-	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
-#endif
-
-	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
-		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
-
-	/* let the HW do it's magic ... */
-	msleep(100);
-	/* finish PXP init */
-	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
-	if (val != 1) {
-		BNX2X_ERR("PXP2 CFG failed\n");
-		return -EBUSY;
-	}
-	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
-	if (val != 1) {
-		BNX2X_ERR("PXP2 RD_INIT failed\n");
-		return -EBUSY;
-	}
-
-	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
-	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
-
-	bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
-
-	/* clean the DMAE memory */
-	bp->dmae_ready = 1;
-	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
-
-	bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
-
-	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
-	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
-	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
-	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
-
-	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
-
-#ifdef BCM_CNIC
-	wb_write[0] = 0;
-	wb_write[1] = 0;
-	for (i = 0; i < 64; i++) {
-		REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
-		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
-
-		if (CHIP_IS_E1H(bp)) {
-			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
-			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
-					  wb_write, 2);
-		}
-	}
-#endif
-	/* soft reset pulse */
-	REG_WR(bp, QM_REG_SOFT_RESET, 1);
-	REG_WR(bp, QM_REG_SOFT_RESET, 0);
-
-#ifdef BCM_CNIC
-	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
-#endif
-
-	bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
-	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
-	if (!CHIP_REV_IS_SLOW(bp)) {
-		/* enable hw interrupt from doorbell Q */
-		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
-	}
-
-	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
-#ifndef BCM_CNIC
-	/* set NIC mode */
-	REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-	if (CHIP_IS_E1H(bp))
-		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
-
-	bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
-
-	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-
-	bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
-
-	/* sync semi rtc */
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-	       0x80000000);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-	       0x80000000);
-
-	bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
-
-	REG_WR(bp, SRC_REG_SOFT_RST, 1);
-	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
-		REG_WR(bp, i, random32());
-	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
-#ifdef BCM_CNIC
-	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
-	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
-	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
-	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
-	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
-	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
-	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
-	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
-	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
-	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
-#endif
-	REG_WR(bp, SRC_REG_SOFT_RST, 0);
-
-	if (sizeof(union cdu_context) != 1024)
-		/* we currently assume that a context is 1024 bytes */
-		dev_alert(&bp->pdev->dev, "please adjust the size "
-					  "of cdu_context(%ld)\n",
-			 (long)sizeof(union cdu_context));
-
-	bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
-	val = (4 << 24) + (0 << 12) + 1024;
-	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
-
-	bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
-	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
-	/* enable context validation interrupt from CFC */
-	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
-
-	/* set the thresholds to prevent CFC/CDU race */
-	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
-
-	bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
-
-	bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
-	/* Reset PCIE errors for debug */
-	REG_WR(bp, 0x2814, 0xffffffff);
-	REG_WR(bp, 0x3820, 0xffffffff);
-
-	bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
-	bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
-
-	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
-	if (CHIP_IS_E1H(bp)) {
-		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
-		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
-	}
-
-	if (CHIP_REV_IS_SLOW(bp))
-		msleep(200);
-
-	/* finish CFC init */
-	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
-	if (val != 1) {
-		BNX2X_ERR("CFC LL_INIT failed\n");
-		return -EBUSY;
-	}
-	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
-	if (val != 1) {
-		BNX2X_ERR("CFC AC_INIT failed\n");
-		return -EBUSY;
-	}
-	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
-	if (val != 1) {
-		BNX2X_ERR("CFC CAM_INIT failed\n");
-		return -EBUSY;
-	}
-	REG_WR(bp, CFC_REG_DEBUG0, 0);
-
-	/* read NIG statistic
-	   to see if this is our first up since powerup */
-	bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-	val = *bnx2x_sp(bp, wb_data[0]);
-
-	/* do internal memory self test */
-	if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
-		BNX2X_ERR("internal mem self test failed\n");
-		return -EBUSY;
-	}
-
-	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-		bp->port.need_hw_lock = 1;
-		break;
-
-	default:
-		break;
-	}
-
-	bnx2x_setup_fan_failure_detection(bp);
-
-	/* clear PXP2 attentions */
-	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
-
-	enable_blocks_attention(bp);
-	if (CHIP_PARITY_SUPPORTED(bp))
-		enable_blocks_parity(bp);
-
-	if (!BP_NOMCP(bp)) {
-		bnx2x_acquire_phy_lock(bp);
-		bnx2x_common_init_phy(bp, bp->common.shmem_base);
-		bnx2x_release_phy_lock(bp);
-	} else
-		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
-
-	return 0;
-}
-
-static int bnx2x_init_port(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
-	u32 low, high;
-	u32 val;
-
-	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
-
-	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
-
-	bnx2x_init_block(bp, PXP_BLOCK, init_stage);
-	bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
-
-#ifdef BCM_CNIC
-	REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
-
-	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
-	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
-	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
-#endif
-
-	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
-	if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
-		/* no pause for emulation and FPGA */
-		low = 0;
-		high = 513;
-	} else {
-		if (IS_E1HMF(bp))
-			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
-		else if (bp->dev->mtu > 4096) {
-			if (bp->flags & ONE_PORT_FLAG)
-				low = 160;
-			else {
-				val = bp->dev->mtu;
-				/* (24*1024 + val*4)/256 */
-				low = 96 + (val/64) + ((val % 64) ? 1 : 0);
-			}
-		} else
-			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
-		high = low + 56;	/* 14*1024/256 */
-	}
-	REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
-	REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
-
-
-	bnx2x_init_block(bp, PRS_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, USDM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, USEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
-	bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, UPB_BLOCK, init_stage);
-	bnx2x_init_block(bp, XPB_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, PBF_BLOCK, init_stage);
-
-	/* configure PBF to work without PAUSE mtu 9000 */
-	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
-
-	/* update threshold */
-	REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
-	/* update init credit */
-	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
-
-	/* probe changes */
-	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
-	msleep(5);
-	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
-
-#ifdef BCM_CNIC
-	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
-#endif
-	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
-	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
-
-	if (CHIP_IS_E1(bp)) {
-		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-	}
-	bnx2x_init_block(bp, HC_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
-	/* init aeu_mask_attn_func_0/1:
-	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
-	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
-	 *             bits 4-7 are used for "per vn group attention" */
-	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
-	       (IS_E1HMF(bp) ? 0xF7 : 0x7));
-
-	bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
-	bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
-	bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
-	bnx2x_init_block(bp, DBU_BLOCK, init_stage);
-	bnx2x_init_block(bp, DBG_BLOCK, init_stage);
-
-	bnx2x_init_block(bp, NIG_BLOCK, init_stage);
-
-	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
-
-	if (CHIP_IS_E1H(bp)) {
-		/* 0x2 disable e1hov, 0x1 enable */
-		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
-		       (IS_E1HMF(bp) ? 0x1 : 0x2));
-
-		{
-			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
-			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
-			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
-		}
-	}
-
-	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
-	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-
-	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-		{
-		u32 swap_val, swap_override, aeu_gpio_mask, offset;
-
-		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-			       MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
-
-		/* The GPIO should be swapped if the swap register is
-		   set and active */
-		swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-		swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
-		/* Select function upon port-swap configuration */
-		if (port == 0) {
-			offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
-			aeu_gpio_mask = (swap_val && swap_override) ?
-				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
-				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
-		} else {
-			offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
-			aeu_gpio_mask = (swap_val && swap_override) ?
-				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
-				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
-		}
-		val = REG_RD(bp, offset);
-		/* add GPIO3 to group */
-		val |= aeu_gpio_mask;
-		REG_WR(bp, offset, val);
-		}
-		break;
-
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-		/* add SPIO 5 to group 0 */
-		{
-		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-		val = REG_RD(bp, reg_addr);
-		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
-		REG_WR(bp, reg_addr, val);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	bnx2x__link_reset(bp);
-
-	return 0;
-}
-
-#define ILT_PER_FUNC		(768/2)
-#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
-/* the phys address is shifted right 12 bits and has an added
-   1=valid bit added to the 53rd bit
-   then since this is a wide register(TM)
-   we split it into two 32 bit writes
- */
-#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
-#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
-#define PXP_ONE_ILT(x)		(((x) << 10) | x)
-#define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)
-
-#ifdef BCM_CNIC
-#define CNIC_ILT_LINES		127
-#define CNIC_CTX_PER_ILT	16
-#else
-#define CNIC_ILT_LINES		0
-#endif
-
-static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
-{
-	int reg;
-
-	if (CHIP_IS_E1H(bp))
-		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
-	else /* E1 */
-		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
-
-	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
-}
-
-static int bnx2x_init_func(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	u32 addr, val;
-	int i;
-
-	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
-
-	/* set MSI reconfigure capability */
-	addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
-	val = REG_RD(bp, addr);
-	val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
-	REG_WR(bp, addr, val);
-
-	i = FUNC_ILT_BASE(func);
-
-	bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
-	if (CHIP_IS_E1H(bp)) {
-		REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
-		REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
-	} else /* E1 */
-		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
-		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
-
-#ifdef BCM_CNIC
-	i += 1 + CNIC_ILT_LINES;
-	bnx2x_ilt_wr(bp, i, bp->timers_mapping);
-	if (CHIP_IS_E1(bp))
-		REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
-	else {
-		REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
-		REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
-	}
-
-	i++;
-	bnx2x_ilt_wr(bp, i, bp->qm_mapping);
-	if (CHIP_IS_E1(bp))
-		REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
-	else {
-		REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
-		REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
-	}
-
-	i++;
-	bnx2x_ilt_wr(bp, i, bp->t1_mapping);
-	if (CHIP_IS_E1(bp))
-		REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
-	else {
-		REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
-		REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
-	}
-
-	/* tell the searcher where the T2 table is */
-	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
-
-	bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
-		    U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
-
-	bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
-		    U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
-		    U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
-
-	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
-#endif
-
-	if (CHIP_IS_E1H(bp)) {
-		bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
-		bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
-
-		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
-		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
-	}
-
-	/* HC init per function */
-	if (CHIP_IS_E1H(bp)) {
-		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
-
-		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-	}
-	bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
-
-	/* Reset PCIE errors for debug */
-	REG_WR(bp, 0x2114, 0xffffffff);
-	REG_WR(bp, 0x2120, 0xffffffff);
-
-	return 0;
-}
-
-static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
-{
-	int i, rc = 0;
-
-	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
-	   BP_FUNC(bp), load_code);
-
-	bp->dmae_ready = 0;
-	mutex_init(&bp->dmae_mutex);
-	rc = bnx2x_gunzip_init(bp);
-	if (rc)
-		return rc;
-
-	switch (load_code) {
-	case FW_MSG_CODE_DRV_LOAD_COMMON:
-		rc = bnx2x_init_common(bp);
-		if (rc)
-			goto init_hw_err;
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_PORT:
-		bp->dmae_ready = 1;
-		rc = bnx2x_init_port(bp);
-		if (rc)
-			goto init_hw_err;
-		/* no break */
-
-	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
-		bp->dmae_ready = 1;
-		rc = bnx2x_init_func(bp);
-		if (rc)
-			goto init_hw_err;
-		break;
-
-	default:
-		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
-		break;
-	}
-
-	if (!BP_NOMCP(bp)) {
-		int func = BP_FUNC(bp);
-
-		bp->fw_drv_pulse_wr_seq =
-				(SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
-				 DRV_PULSE_SEQ_MASK);
-		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-	}
-
-	/* this needs to be done before gunzip end */
-	bnx2x_zero_def_sb(bp);
-	for_each_queue(bp, i)
-		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#ifdef BCM_CNIC
-	bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#endif
-
-init_hw_err:
-	bnx2x_gunzip_end(bp);
-
-	return rc;
-}
-
-static void bnx2x_free_mem(struct bnx2x *bp)
-{
-
-#define BNX2X_PCI_FREE(x, y, size) \
-	do { \
-		if (x) { \
-			dma_free_coherent(&bp->pdev->dev, size, x, y); \
-			x = NULL; \
-			y = 0; \
-		} \
-	} while (0)
-
-#define BNX2X_FREE(x) \
-	do { \
-		if (x) { \
-			vfree(x); \
-			x = NULL; \
-		} \
-	} while (0)
-
-	int i;
-
-	/* fastpath */
-	/* Common */
-	for_each_queue(bp, i) {
-
-		/* status blocks */
-		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
-			       bnx2x_fp(bp, i, status_blk_mapping),
-			       sizeof(struct host_status_block));
-	}
-	/* Rx */
-	for_each_queue(bp, i) {
-
-		/* fastpath rx rings: rx_buf rx_desc rx_comp */
-		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
-		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
-			       bnx2x_fp(bp, i, rx_desc_mapping),
-			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
-		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
-			       bnx2x_fp(bp, i, rx_comp_mapping),
-			       sizeof(struct eth_fast_path_rx_cqe) *
-			       NUM_RCQ_BD);
-
-		/* SGE ring */
-		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
-		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
-			       bnx2x_fp(bp, i, rx_sge_mapping),
-			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
-	}
-	/* Tx */
-	for_each_queue(bp, i) {
-
-		/* fastpath tx rings: tx_buf tx_desc */
-		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
-		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
-			       bnx2x_fp(bp, i, tx_desc_mapping),
-			       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
-	}
-	/* end of fastpath */
-
-	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
-		       sizeof(struct host_def_status_block));
-
-	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
-		       sizeof(struct bnx2x_slowpath));
-
-#ifdef BCM_CNIC
-	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
-	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
-	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
-	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
-	BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
-		       sizeof(struct host_status_block));
-#endif
-	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
-
-#undef BNX2X_PCI_FREE
-#undef BNX2X_KFREE
-}
-
-static int bnx2x_alloc_mem(struct bnx2x *bp)
-{
-
-#define BNX2X_PCI_ALLOC(x, y, size) \
-	do { \
-		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
-		if (x == NULL) \
-			goto alloc_mem_err; \
-		memset(x, 0, size); \
-	} while (0)
-
-#define BNX2X_ALLOC(x, size) \
-	do { \
-		x = vmalloc(size); \
-		if (x == NULL) \
-			goto alloc_mem_err; \
-		memset(x, 0, size); \
-	} while (0)
-
-	int i;
-
-	/* fastpath */
-	/* Common */
-	for_each_queue(bp, i) {
-		bnx2x_fp(bp, i, bp) = bp;
-
-		/* status blocks */
-		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
-				&bnx2x_fp(bp, i, status_blk_mapping),
-				sizeof(struct host_status_block));
-	}
-	/* Rx */
-	for_each_queue(bp, i) {
-
-		/* fastpath rx rings: rx_buf rx_desc rx_comp */
-		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
-				sizeof(struct sw_rx_bd) * NUM_RX_BD);
-		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
-				&bnx2x_fp(bp, i, rx_desc_mapping),
-				sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
-		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
-				&bnx2x_fp(bp, i, rx_comp_mapping),
-				sizeof(struct eth_fast_path_rx_cqe) *
-				NUM_RCQ_BD);
-
-		/* SGE ring */
-		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
-				sizeof(struct sw_rx_page) * NUM_RX_SGE);
-		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
-				&bnx2x_fp(bp, i, rx_sge_mapping),
-				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
-	}
-	/* Tx */
-	for_each_queue(bp, i) {
-
-		/* fastpath tx rings: tx_buf tx_desc */
-		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
-				sizeof(struct sw_tx_bd) * NUM_TX_BD);
-		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
-				&bnx2x_fp(bp, i, tx_desc_mapping),
-				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
-	}
-	/* end of fastpath */
-
-	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
-			sizeof(struct host_def_status_block));
-
-	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
-			sizeof(struct bnx2x_slowpath));
-
-#ifdef BCM_CNIC
-	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
-
-	/* allocate searcher T2 table
-	   we allocate 1/4 of alloc num for T2
-	  (which is not entered into the ILT) */
-	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
-
-	/* Initialize T2 (for 1024 connections) */
-	for (i = 0; i < 16*1024; i += 64)
-		*(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
-
-	/* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
-	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
-
-	/* QM queues (128*MAX_CONN) */
-	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
-
-	BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
-			sizeof(struct host_status_block));
-#endif
-
-	/* Slow path ring */
-	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
-
-	return 0;
-
-alloc_mem_err:
-	bnx2x_free_mem(bp);
-	return -ENOMEM;
-
-#undef BNX2X_PCI_ALLOC
-#undef BNX2X_ALLOC
-}
-
-static void bnx2x_free_tx_skbs(struct bnx2x *bp)
-{
-	int i;
-
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		u16 bd_cons = fp->tx_bd_cons;
-		u16 sw_prod = fp->tx_pkt_prod;
-		u16 sw_cons = fp->tx_pkt_cons;
-
-		while (sw_cons != sw_prod) {
-			bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
-			sw_cons++;
-		}
-	}
-}
-
-static void bnx2x_free_rx_skbs(struct bnx2x *bp)
-{
-	int i, j;
-
-	for_each_queue(bp, j) {
-		struct bnx2x_fastpath *fp = &bp->fp[j];
-
-		for (i = 0; i < NUM_RX_BD; i++) {
-			struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
-			struct sk_buff *skb = rx_buf->skb;
-
-			if (skb == NULL)
-				continue;
-
-			dma_unmap_single(&bp->pdev->dev,
-					 dma_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, DMA_FROM_DEVICE);
-
-			rx_buf->skb = NULL;
-			dev_kfree_skb(skb);
-		}
-		if (!fp->disable_tpa)
-			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-					    ETH_MAX_AGGREGATION_QUEUES_E1 :
-					    ETH_MAX_AGGREGATION_QUEUES_E1H);
-	}
-}
-
-static void bnx2x_free_skbs(struct bnx2x *bp)
-{
-	bnx2x_free_tx_skbs(bp);
-	bnx2x_free_rx_skbs(bp);
-}
-
-static void bnx2x_free_msix_irqs(struct bnx2x *bp)
-{
-	int i, offset = 1;
-
-	free_irq(bp->msix_table[0].vector, bp->dev);
-	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
-	   bp->msix_table[0].vector);
-
-#ifdef BCM_CNIC
-	offset++;
-#endif
-	for_each_queue(bp, i) {
-		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
-		   "state %x\n", i, bp->msix_table[i + offset].vector,
-		   bnx2x_fp(bp, i, state));
-
-		free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
-	}
-}
-
-static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
-{
-	if (bp->flags & USING_MSIX_FLAG) {
-		if (!disable_only)
-			bnx2x_free_msix_irqs(bp);
-		pci_disable_msix(bp->pdev);
-		bp->flags &= ~USING_MSIX_FLAG;
-
-	} else if (bp->flags & USING_MSI_FLAG) {
-		if (!disable_only)
-			free_irq(bp->pdev->irq, bp->dev);
-		pci_disable_msi(bp->pdev);
-		bp->flags &= ~USING_MSI_FLAG;
-
-	} else if (!disable_only)
-		free_irq(bp->pdev->irq, bp->dev);
-}
-
-static int bnx2x_enable_msix(struct bnx2x *bp)
-{
-	int i, rc, offset = 1;
-	int igu_vec = 0;
-
-	bp->msix_table[0].entry = igu_vec;
-	DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
-
-#ifdef BCM_CNIC
-	igu_vec = BP_L_ID(bp) + offset;
-	bp->msix_table[1].entry = igu_vec;
-	DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
-	offset++;
-#endif
-	for_each_queue(bp, i) {
-		igu_vec = BP_L_ID(bp) + offset + i;
-		bp->msix_table[i + offset].entry = igu_vec;
-		DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
-		   "(fastpath #%u)\n", i + offset, igu_vec, i);
-	}
-
-	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
-			     BNX2X_NUM_QUEUES(bp) + offset);
-
-	/*
-	 * reconfigure number of tx/rx queues according to available
-	 * MSI-X vectors
-	 */
-	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
-		/* vectors available for FP */
-		int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
-
-		DP(NETIF_MSG_IFUP,
-		   "Trying to use less MSI-X vectors: %d\n", rc);
-
-		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
-		if (rc) {
-			DP(NETIF_MSG_IFUP,
-			   "MSI-X is not attainable  rc %d\n", rc);
-			return rc;
-		}
-
-		bp->num_queues = min(bp->num_queues, fp_vec);
-
-		DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
-				  bp->num_queues);
-	} else if (rc) {
-		DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
-		return rc;
-	}
-
-	bp->flags |= USING_MSIX_FLAG;
-
-	return 0;
-}
-
-static int bnx2x_req_msix_irqs(struct bnx2x *bp)
-{
-	int i, rc, offset = 1;
-
-	rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
-			 bp->dev->name, bp->dev);
-	if (rc) {
-		BNX2X_ERR("request sp irq failed\n");
-		return -EBUSY;
-	}
-
-#ifdef BCM_CNIC
-	offset++;
-#endif
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
-			 bp->dev->name, i);
-
-		rc = request_irq(bp->msix_table[i + offset].vector,
-				 bnx2x_msix_fp_int, 0, fp->name, fp);
-		if (rc) {
-			BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
-			bnx2x_free_msix_irqs(bp);
-			return -EBUSY;
-		}
-
-		fp->state = BNX2X_FP_STATE_IRQ;
-	}
-
-	i = BNX2X_NUM_QUEUES(bp);
-	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
-	       " ... fp[%d] %d\n",
-	       bp->msix_table[0].vector,
-	       0, bp->msix_table[offset].vector,
-	       i - 1, bp->msix_table[offset + i - 1].vector);
-
-	return 0;
-}
-
-static int bnx2x_enable_msi(struct bnx2x *bp)
-{
-	int rc;
-
-	rc = pci_enable_msi(bp->pdev);
-	if (rc) {
-		DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
-		return -1;
-	}
-	bp->flags |= USING_MSI_FLAG;
-
-	return 0;
-}
-
-static int bnx2x_req_irq(struct bnx2x *bp)
-{
-	unsigned long flags;
-	int rc;
-
-	if (bp->flags & USING_MSI_FLAG)
-		flags = 0;
-	else
-		flags = IRQF_SHARED;
-
-	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
-			 bp->dev->name, bp->dev);
-	if (!rc)
-		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
-
-	return rc;
-}
-
-static void bnx2x_napi_enable(struct bnx2x *bp)
-{
-	int i;
-
-	for_each_queue(bp, i)
-		napi_enable(&bnx2x_fp(bp, i, napi));
-}
-
-static void bnx2x_napi_disable(struct bnx2x *bp)
-{
-	int i;
-
-	for_each_queue(bp, i)
-		napi_disable(&bnx2x_fp(bp, i, napi));
-}
-
-static void bnx2x_netif_start(struct bnx2x *bp)
-{
-	int intr_sem;
-
-	intr_sem = atomic_dec_and_test(&bp->intr_sem);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
-	if (intr_sem) {
-		if (netif_running(bp->dev)) {
-			bnx2x_napi_enable(bp);
-			bnx2x_int_enable(bp);
-			if (bp->state == BNX2X_STATE_OPEN)
-				netif_tx_wake_all_queues(bp->dev);
-		}
-	}
-}
-
-static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
-{
-	bnx2x_int_disable_sync(bp, disable_hw);
-	bnx2x_napi_disable(bp);
-	netif_tx_disable(bp->dev);
-}
-
-/*
- * Init service functions
- */
-
-/**
- * Sets a MAC in a CAM for a few L2 Clients for E1 chip
- *
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- * @param with_bcast set broadcast MAC as well
- */
-static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
-				      u32 cl_bit_vec, u8 cam_offset,
-				      u8 with_bcast)
-{
-	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-	int port = BP_PORT(bp);
-
-	/* CAM allocation
-	 * unicasts 0-31:port0 32-63:port1
-	 * multicast 64-127:port0 128-191:port1
-	 */
-	config->hdr.length = 1 + (with_bcast ? 1 : 0);
-	config->hdr.offset = cam_offset;
-	config->hdr.client_id = 0xff;
-	config->hdr.reserved1 = 0;
-
-	/* primary MAC */
-	config->config_table[0].cam_entry.msb_mac_addr =
-					swab16(*(u16 *)&mac[0]);
-	config->config_table[0].cam_entry.middle_mac_addr =
-					swab16(*(u16 *)&mac[2]);
-	config->config_table[0].cam_entry.lsb_mac_addr =
-					swab16(*(u16 *)&mac[4]);
-	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
-	if (set)
-		config->config_table[0].target_table_entry.flags = 0;
-	else
-		CAM_INVALIDATE(config->config_table[0]);
-	config->config_table[0].target_table_entry.clients_bit_vector =
-						cpu_to_le32(cl_bit_vec);
-	config->config_table[0].target_table_entry.vlan_id = 0;
-
-	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
-	   (set ? "setting" : "clearing"),
-	   config->config_table[0].cam_entry.msb_mac_addr,
-	   config->config_table[0].cam_entry.middle_mac_addr,
-	   config->config_table[0].cam_entry.lsb_mac_addr);
-
-	/* broadcast */
-	if (with_bcast) {
-		config->config_table[1].cam_entry.msb_mac_addr =
-			cpu_to_le16(0xffff);
-		config->config_table[1].cam_entry.middle_mac_addr =
-			cpu_to_le16(0xffff);
-		config->config_table[1].cam_entry.lsb_mac_addr =
-			cpu_to_le16(0xffff);
-		config->config_table[1].cam_entry.flags = cpu_to_le16(port);
-		if (set)
-			config->config_table[1].target_table_entry.flags =
-					TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
-		else
-			CAM_INVALIDATE(config->config_table[1]);
-		config->config_table[1].target_table_entry.clients_bit_vector =
-							cpu_to_le32(cl_bit_vec);
-		config->config_table[1].target_table_entry.vlan_id = 0;
-	}
-
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-}
-
-/**
- * Sets a MAC in a CAM for a few L2 Clients for E1H chip
- *
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- */
-static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
-				       u32 cl_bit_vec, u8 cam_offset)
-{
-	struct mac_configuration_cmd_e1h *config =
-		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
-
-	config->hdr.length = 1;
-	config->hdr.offset = cam_offset;
-	config->hdr.client_id = 0xff;
-	config->hdr.reserved1 = 0;
-
-	/* primary MAC */
-	config->config_table[0].msb_mac_addr =
-					swab16(*(u16 *)&mac[0]);
-	config->config_table[0].middle_mac_addr =
-					swab16(*(u16 *)&mac[2]);
-	config->config_table[0].lsb_mac_addr =
-					swab16(*(u16 *)&mac[4]);
-	config->config_table[0].clients_bit_vector =
-					cpu_to_le32(cl_bit_vec);
-	config->config_table[0].vlan_id = 0;
-	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
-	if (set)
-		config->config_table[0].flags = BP_PORT(bp);
-	else
-		config->config_table[0].flags =
-				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
-
-	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
-	   (set ? "setting" : "clearing"),
-	   config->config_table[0].msb_mac_addr,
-	   config->config_table[0].middle_mac_addr,
-	   config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
-
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-}
-
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-			     int *state_p, int poll)
-{
-	/* can take a while if any port is running */
-	int cnt = 5000;
-
-	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
-	   poll ? "polling" : "waiting", state, idx);
-
-	might_sleep();
-	while (cnt--) {
-		if (poll) {
-			bnx2x_rx_int(bp->fp, 10);
-			/* if index is different from 0
-			 * the reply for some commands will
-			 * be on the non default queue
-			 */
-			if (idx)
-				bnx2x_rx_int(&bp->fp[idx], 10);
-		}
-
-		mb(); /* state is changed by bnx2x_sp_event() */
-		if (*state_p == state) {
-#ifdef BNX2X_STOP_ON_ERROR
-			DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
-#endif
-			return 0;
-		}
-
-		msleep(1);
-
-		if (bp->panic)
-			return -EIO;
-	}
-
-	/* timeout! */
-	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
-		  poll ? "polling" : "waiting", state, idx);
-#ifdef BNX2X_STOP_ON_ERROR
-	bnx2x_panic();
-#endif
-
-	return -EBUSY;
-}
-
-static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
-{
-	bp->set_mac_pending++;
-	smp_wmb();
-
-	bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
-				   (1 << bp->fp->cl_id), BP_FUNC(bp));
-
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-}
-
-static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
-{
-	bp->set_mac_pending++;
-	smp_wmb();
-
-	bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
-				  (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
-				  1);
-
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-}
-
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). This function will wait until the ramdord completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
-{
-	u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
-
-	bp->set_mac_pending++;
-	smp_wmb();
-
-	/* Send a SET_MAC ramrod */
-	if (CHIP_IS_E1(bp))
-		bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
-				  cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
-				  1);
-	else
-		/* CAM allocation for E1H
-		* unicasts: by func number
-		* multicast: 20+FUNC*20, 20 each
-		*/
-		bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
-				   cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
-
-	/* Wait for a completion when setting */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-
-	return 0;
-}
-#endif
-
-static int bnx2x_setup_leading(struct bnx2x *bp)
-{
-	int rc;
-
-	/* reset IGU state */
-	bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-
-	/* SETUP ramrod */
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
-
-	return rc;
-}
-
-static int bnx2x_setup_multi(struct bnx2x *bp, int index)
-{
-	struct bnx2x_fastpath *fp = &bp->fp[index];
-
-	/* reset IGU state */
-	bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-
-	/* SETUP ramrod */
-	fp->state = BNX2X_FP_STATE_OPENING;
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
-		      fp->cl_id, 0);
-
-	/* Wait for completion */
-	return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
-				 &(fp->state), 0);
-}
-
-static int bnx2x_poll(struct napi_struct *napi, int budget);
-
-static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
-{
-
-	switch (bp->multi_mode) {
-	case ETH_RSS_MODE_DISABLED:
-		bp->num_queues = 1;
-		break;
-
-	case ETH_RSS_MODE_REGULAR:
-		if (num_queues)
-			bp->num_queues = min_t(u32, num_queues,
-						  BNX2X_MAX_QUEUES(bp));
-		else
-			bp->num_queues = min_t(u32, num_online_cpus(),
-						  BNX2X_MAX_QUEUES(bp));
-		break;
-
-
-	default:
-		bp->num_queues = 1;
-		break;
-	}
-}
-
-static int bnx2x_set_num_queues(struct bnx2x *bp)
-{
-	int rc = 0;
-
-	switch (int_mode) {
-	case INT_MODE_INTx:
-	case INT_MODE_MSI:
-		bp->num_queues = 1;
-		DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
-		break;
-	default:
-		/* Set number of queues according to bp->multi_mode value */
-		bnx2x_set_num_queues_msix(bp);
-
-		DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
-		   bp->num_queues);
-
-		/* if we can't use MSI-X we only need one fp,
-		 * so try to enable MSI-X with the requested number of fp's
-		 * and fallback to MSI or legacy INTx with one fp
-		 */
-		rc = bnx2x_enable_msix(bp);
-		if (rc)
-			/* failed to enable MSI-X */
-			bp->num_queues = 1;
-		break;
-	}
-	bp->dev->real_num_tx_queues = bp->num_queues;
-	return rc;
-}
-
-#ifdef BCM_CNIC
-static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
-static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
-#endif
-
-/* must be called with rtnl_lock */
-static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
-{
-	u32 load_code;
-	int i, rc;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return -EPERM;
-#endif
-
-	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
-
-	rc = bnx2x_set_num_queues(bp);
-
-	if (bnx2x_alloc_mem(bp)) {
-		bnx2x_free_irq(bp, true);
-		return -ENOMEM;
-	}
-
-	for_each_queue(bp, i)
-		bnx2x_fp(bp, i, disable_tpa) =
-					((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-	for_each_queue(bp, i)
-		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
-			       bnx2x_poll, 128);
-
-	bnx2x_napi_enable(bp);
-
-	if (bp->flags & USING_MSIX_FLAG) {
-		rc = bnx2x_req_msix_irqs(bp);
-		if (rc) {
-			bnx2x_free_irq(bp, true);
-			goto load_error1;
-		}
-	} else {
-		/* Fall to INTx if failed to enable MSI-X due to lack of
-		   memory (in bnx2x_set_num_queues()) */
-		if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
-			bnx2x_enable_msi(bp);
-		bnx2x_ack_int(bp);
-		rc = bnx2x_req_irq(bp);
-		if (rc) {
-			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
-			bnx2x_free_irq(bp, true);
-			goto load_error1;
-		}
-		if (bp->flags & USING_MSI_FLAG) {
-			bp->dev->irq = bp->pdev->irq;
-			netdev_info(bp->dev, "using MSI  IRQ %d\n",
-				    bp->pdev->irq);
-		}
-	}
-
-	/* Send LOAD_REQUEST command to MCP
-	   Returns the type of LOAD command:
-	   if it is the first port to be initialized
-	   common blocks should be initialized, otherwise - not
-	*/
-	if (!BP_NOMCP(bp)) {
-		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
-		if (!load_code) {
-			BNX2X_ERR("MCP response failure, aborting\n");
-			rc = -EBUSY;
-			goto load_error2;
-		}
-		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
-			rc = -EBUSY; /* other port in diagnostic mode */
-			goto load_error2;
-		}
-
-	} else {
-		int port = BP_PORT(bp);
-
-		DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
-		   load_count[0], load_count[1], load_count[2]);
-		load_count[0]++;
-		load_count[1 + port]++;
-		DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
-		   load_count[0], load_count[1], load_count[2]);
-		if (load_count[0] == 1)
-			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
-		else if (load_count[1 + port] == 1)
-			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
-		else
-			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
-	}
-
-	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
-	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
-		bp->port.pmf = 1;
-	else
-		bp->port.pmf = 0;
-	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
-
-	/* Initialize HW */
-	rc = bnx2x_init_hw(bp, load_code);
-	if (rc) {
-		BNX2X_ERR("HW init failed, aborting\n");
-		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-		goto load_error2;
-	}
-
-	/* Setup NIC internals and enable interrupts */
-	bnx2x_nic_init(bp, load_code);
-
-	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
-	    (bp->common.shmem2_base))
-		SHMEM2_WR(bp, dcc_support,
-			  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
-			   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
-
-	/* Send LOAD_DONE command to MCP */
-	if (!BP_NOMCP(bp)) {
-		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
-		if (!load_code) {
-			BNX2X_ERR("MCP response failure, aborting\n");
-			rc = -EBUSY;
-			goto load_error3;
-		}
-	}
-
-	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
-
-	rc = bnx2x_setup_leading(bp);
-	if (rc) {
-		BNX2X_ERR("Setup leading failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-		goto load_error3;
-#else
-		bp->panic = 1;
-		return -EBUSY;
-#endif
-	}
-
-	if (CHIP_IS_E1H(bp))
-		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
-			DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
-			bp->flags |= MF_FUNC_DIS;
-		}
-
-	if (bp->state == BNX2X_STATE_OPEN) {
-#ifdef BCM_CNIC
-		/* Enable Timer scan */
-		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
-#endif
-		for_each_nondefault_queue(bp, i) {
-			rc = bnx2x_setup_multi(bp, i);
-			if (rc)
-#ifdef BCM_CNIC
-				goto load_error4;
-#else
-				goto load_error3;
-#endif
-		}
-
-		if (CHIP_IS_E1(bp))
-			bnx2x_set_eth_mac_addr_e1(bp, 1);
-		else
-			bnx2x_set_eth_mac_addr_e1h(bp, 1);
-#ifdef BCM_CNIC
-		/* Set iSCSI L2 MAC */
-		mutex_lock(&bp->cnic_mutex);
-		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
-			bnx2x_set_iscsi_eth_mac_addr(bp, 1);
-			bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
-			bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
-				      CNIC_SB_ID(bp));
-		}
-		mutex_unlock(&bp->cnic_mutex);
-#endif
-	}
-
-	if (bp->port.pmf)
-		bnx2x_initial_phy_init(bp, load_mode);
-
-	/* Start fast path */
-	switch (load_mode) {
-	case LOAD_NORMAL:
-		if (bp->state == BNX2X_STATE_OPEN) {
-			/* Tx queue should be only reenabled */
-			netif_tx_wake_all_queues(bp->dev);
-		}
-		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
-		break;
-
-	case LOAD_OPEN:
-		netif_tx_start_all_queues(bp->dev);
-		if (bp->state != BNX2X_STATE_OPEN)
-			netif_tx_disable(bp->dev);
-		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
-		break;
-
-	case LOAD_DIAG:
-		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
-		bp->state = BNX2X_STATE_DIAG;
-		break;
-
-	default:
-		break;
-	}
-
-	if (!bp->port.pmf)
-		bnx2x__link_status_update(bp);
-
-	/* start the timer */
-	mod_timer(&bp->timer, jiffies + bp->current_interval);
-
-#ifdef BCM_CNIC
-	bnx2x_setup_cnic_irq_info(bp);
-	if (bp->state == BNX2X_STATE_OPEN)
-		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-#endif
-	bnx2x_inc_load_cnt(bp);
-
-	return 0;
-
-#ifdef BCM_CNIC
-load_error4:
-	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
-#endif
-load_error3:
-	bnx2x_int_disable_sync(bp, 1);
-	if (!BP_NOMCP(bp)) {
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-	}
-	bp->port.pmf = 0;
-	/* Free SKBs, SGEs, TPA pool and driver internals */
-	bnx2x_free_skbs(bp);
-	for_each_queue(bp, i)
-		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-load_error2:
-	/* Release IRQs */
-	bnx2x_free_irq(bp, false);
-load_error1:
-	bnx2x_napi_disable(bp);
-	for_each_queue(bp, i)
-		netif_napi_del(&bnx2x_fp(bp, i, napi));
-	bnx2x_free_mem(bp);
-
-	return rc;
-}
-
-static int bnx2x_stop_multi(struct bnx2x *bp, int index)
-{
-	struct bnx2x_fastpath *fp = &bp->fp[index];
-	int rc;
-
-	/* halt the connection */
-	fp->state = BNX2X_FP_STATE_HALTING;
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
-			       &(fp->state), 1);
-	if (rc) /* timeout */
-		return rc;
-
-	/* delete cfc entry */
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
-			       &(fp->state), 1);
-	return rc;
-}
-
-static int bnx2x_stop_leading(struct bnx2x *bp)
-{
-	__le16 dsb_sp_prod_idx;
-	/* if the other port is handling traffic,
-	   this can take a lot of time */
-	int cnt = 500;
-	int rc;
-
-	might_sleep();
-
-	/* Send HALT ramrod */
-	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
-
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
-			       &(bp->fp[0].state), 1);
-	if (rc) /* timeout */
-		return rc;
-
-	dsb_sp_prod_idx = *bp->dsb_sp_prod;
-
-	/* Send PORT_DELETE ramrod */
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
-
-	/* Wait for completion to arrive on default status block
-	   we are going to reset the chip anyway
-	   so there is not much to do if this times out
-	 */
-	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
-		if (!cnt) {
-			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
-			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
-			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
-#ifdef BNX2X_STOP_ON_ERROR
-			bnx2x_panic();
-#endif
-			rc = -EBUSY;
-			break;
-		}
-		cnt--;
-		msleep(1);
-		rmb(); /* Refresh the dsb_sp_prod */
-	}
-	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
-	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
-
-	return rc;
-}
-
-static void bnx2x_reset_func(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int func = BP_FUNC(bp);
-	int base, i;
-
-	/* Configure IGU */
-	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-
-#ifdef BCM_CNIC
-	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-	/*
-	 * Wait for at least 10ms and up to 2 second for the timers scan to
-	 * complete
-	 */
-	for (i = 0; i < 200; i++) {
-		msleep(10);
-		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
-			break;
-	}
-#endif
-	/* Clear ILT */
-	base = FUNC_ILT_BASE(func);
-	for (i = base; i < base + ILT_PER_FUNC; i++)
-		bnx2x_ilt_wr(bp, i, 0);
-}
-
-static void bnx2x_reset_port(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	u32 val;
-
-	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
-
-	/* Do not rcv packets to BRB */
-	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
-	/* Do not direct rcv packets that are not for MCP to the BRB */
-	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
-			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-
-	/* Configure AEU */
-	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
-
-	msleep(100);
-	/* Check for BRB port occupancy */
-	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
-	if (val)
-		DP(NETIF_MSG_IFDOWN,
-		   "BRB1 is not empty  %d blocks are occupied\n", val);
-
-	/* TODO: Close Doorbell port? */
-}
-
-static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
-{
-	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
-	   BP_FUNC(bp), reset_code);
-
-	switch (reset_code) {
-	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
-		bnx2x_reset_port(bp);
-		bnx2x_reset_func(bp);
-		bnx2x_reset_common(bp);
-		break;
-
-	case FW_MSG_CODE_DRV_UNLOAD_PORT:
-		bnx2x_reset_port(bp);
-		bnx2x_reset_func(bp);
-		break;
-
-	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
-		bnx2x_reset_func(bp);
-		break;
-
-	default:
-		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
-		break;
-	}
-}
-
-static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
-{
-	int port = BP_PORT(bp);
-	u32 reset_code = 0;
-	int i, cnt, rc;
-
-	/* Wait until tx fastpath tasks complete */
-	for_each_queue(bp, i) {
-		struct bnx2x_fastpath *fp = &bp->fp[i];
-
-		cnt = 1000;
-		while (bnx2x_has_tx_work_unload(fp)) {
-
-			bnx2x_tx_int(fp);
-			if (!cnt) {
-				BNX2X_ERR("timeout waiting for queue[%d]\n",
-					  i);
-#ifdef BNX2X_STOP_ON_ERROR
-				bnx2x_panic();
-				return -EBUSY;
-#else
-				break;
-#endif
-			}
-			cnt--;
-			msleep(1);
-		}
-	}
-	/* Give HW time to discard old tx messages */
-	msleep(1);
-
-	if (CHIP_IS_E1(bp)) {
-		struct mac_configuration_cmd *config =
-						bnx2x_sp(bp, mcast_config);
-
-		bnx2x_set_eth_mac_addr_e1(bp, 0);
-
-		for (i = 0; i < config->hdr.length; i++)
-			CAM_INVALIDATE(config->config_table[i]);
-
-		config->hdr.length = i;
-		if (CHIP_REV_IS_SLOW(bp))
-			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
-		else
-			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
-		config->hdr.client_id = bp->fp->cl_id;
-		config->hdr.reserved1 = 0;
-
-		bp->set_mac_pending++;
-		smp_wmb();
-
-		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
-			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
-
-	} else { /* E1H */
-		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
-		bnx2x_set_eth_mac_addr_e1h(bp, 0);
-
-		for (i = 0; i < MC_HASH_SIZE; i++)
-			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-
-		REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
-	}
-#ifdef BCM_CNIC
-	/* Clear iSCSI L2 MAC */
-	mutex_lock(&bp->cnic_mutex);
-	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
-		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
-		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
-	}
-	mutex_unlock(&bp->cnic_mutex);
-#endif
-
-	if (unload_mode == UNLOAD_NORMAL)
-		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-	else if (bp->flags & NO_WOL_FLAG)
-		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
-
-	else if (bp->wol) {
-		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-		u8 *mac_addr = bp->dev->dev_addr;
-		u32 val;
-		/* The mac address is written to entries 1-4 to
-		   preserve entry 0 which is used by the PMF */
-		u8 entry = (BP_E1HVN(bp) + 1)*8;
-
-		val = (mac_addr[0] << 8) | mac_addr[1];
-		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
-
-		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
-		      (mac_addr[4] << 8) | mac_addr[5];
-		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
-
-		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
-
-	} else
-		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-	/* Close multi and leading connections
-	   Completions for ramrods are collected in a synchronous way */
-	for_each_nondefault_queue(bp, i)
-		if (bnx2x_stop_multi(bp, i))
-			goto unload_error;
-
-	rc = bnx2x_stop_leading(bp);
-	if (rc) {
-		BNX2X_ERR("Stop leading failed!\n");
-#ifdef BNX2X_STOP_ON_ERROR
-		return -EBUSY;
-#else
-		goto unload_error;
-#endif
-	}
-
-unload_error:
-	if (!BP_NOMCP(bp))
-		reset_code = bnx2x_fw_command(bp, reset_code);
-	else {
-		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
-		   load_count[0], load_count[1], load_count[2]);
-		load_count[0]--;
-		load_count[1 + port]--;
-		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
-		   load_count[0], load_count[1], load_count[2]);
-		if (load_count[0] == 0)
-			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
-		else if (load_count[1 + port] == 0)
-			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
-		else
-			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
-	}
-
-	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
-	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
-		bnx2x__link_reset(bp);
-
-	/* Reset the chip */
-	bnx2x_reset_chip(bp, reset_code);
-
-	/* Report UNLOAD_DONE to MCP */
-	if (!BP_NOMCP(bp))
-		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-
-}
-
-static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
-{
-	u32 val;
-
-	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
-
-	if (CHIP_IS_E1(bp)) {
-		int port = BP_PORT(bp);
-		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-			MISC_REG_AEU_MASK_ATTN_FUNC_0;
-
-		val = REG_RD(bp, addr);
-		val &= ~(0x300);
-		REG_WR(bp, addr, val);
-	} else if (CHIP_IS_E1H(bp)) {
-		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
-		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
-			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
-		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
-	}
-}
-
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
-{
-	int i;
-
-	if (bp->state == BNX2X_STATE_CLOSED) {
-		/* Interface has been removed - nothing to recover */
-		bp->recovery_state = BNX2X_RECOVERY_DONE;
-		bp->is_leader = 0;
-		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-		smp_wmb();
-
-		return -EINVAL;
-	}
-
-#ifdef BCM_CNIC
-	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
-	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
-	/* Set "drop all" */
-	bp->rx_mode = BNX2X_RX_MODE_NONE;
-	bnx2x_set_storm_rx_mode(bp);
-
-	/* Disable HW interrupts, NAPI and Tx */
-	bnx2x_netif_stop(bp, 1);
-	netif_carrier_off(bp->dev);
-
-	del_timer_sync(&bp->timer);
-	SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
-		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
-	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-	/* Release IRQs */
-	bnx2x_free_irq(bp, false);
-
-	/* Cleanup the chip if needed */
-	if (unload_mode != UNLOAD_RECOVERY)
-		bnx2x_chip_cleanup(bp, unload_mode);
-
-	bp->port.pmf = 0;
-
-	/* Free SKBs, SGEs, TPA pool and driver internals */
-	bnx2x_free_skbs(bp);
-	for_each_queue(bp, i)
-		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-	for_each_queue(bp, i)
-		netif_napi_del(&bnx2x_fp(bp, i, napi));
-	bnx2x_free_mem(bp);
-
-	bp->state = BNX2X_STATE_CLOSED;
-
-	/* The last driver must disable a "close the gate" if there is no
-	 * parity attention or "process kill" pending.
-	 */
-	if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
-	    bnx2x_reset_is_done(bp))
-		bnx2x_disable_close_the_gate(bp);
-
-	/* Reset MCP mail box sequence if there is on going recovery */
-	if (unload_mode == UNLOAD_RECOVERY)
-		bp->fw_seq = 0;
-
-	return 0;
-}
-
-/* Close gates #2, #3 and #4: */
-static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
-{
-	u32 val, addr;
-
-	/* Gates #2 and #4a are closed/opened for "not E1" only */
-	if (!CHIP_IS_E1(bp)) {
-		/* #4 */
-		val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
-		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
-		       close ? (val | 0x1) : (val & (~(u32)1)));
-		/* #2 */
-		val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
-		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
-		       close ? (val | 0x1) : (val & (~(u32)1)));
-	}
-
-	/* #3 */
-	addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
-	val = REG_RD(bp, addr);
-	REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
-
-	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
-		close ? "closing" : "opening");
-	mmiowb();
-}
-
-#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
-
-static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
-{
-	/* Do some magic... */
-	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
-	*magic_val = val & SHARED_MF_CLP_MAGIC;
-	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
-}
-
-/* Restore the value of the `magic' bit.
- *
- * @param pdev Device handle.
- * @param magic_val Old value of the `magic' bit.
- */
-static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
-{
-	/* Restore the `magic' bit value... */
-	/* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
-	SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
-		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
-	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
-	MF_CFG_WR(bp, shared_mf_config.clp_mb,
-		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
-}
-
-/* Prepares for MCP reset: takes care of CLP configurations.
- *
- * @param bp
- * @param magic_val Old value of 'magic' bit.
- */
-static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
-{
-	u32 shmem;
-	u32 validity_offset;
-
-	DP(NETIF_MSG_HW, "Starting\n");
-
-	/* Set `magic' bit in order to save MF config */
-	if (!CHIP_IS_E1(bp))
-		bnx2x_clp_reset_prep(bp, magic_val);
-
-	/* Get shmem offset */
-	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-	validity_offset = offsetof(struct shmem_region, validity_map[0]);
-
-	/* Clear validity map flags */
-	if (shmem > 0)
-		REG_WR(bp, shmem + validity_offset, 0);
-}
-
-#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
-#define MCP_ONE_TIMEOUT  100    /* 100 ms */
-
-/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
- * depending on the HW type.
- *
- * @param bp
- */
-static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
-{
-	/* special handling for emulation and FPGA,
-	   wait 10 times longer */
-	if (CHIP_REV_IS_SLOW(bp))
-		msleep(MCP_ONE_TIMEOUT*10);
-	else
-		msleep(MCP_ONE_TIMEOUT);
-}
-
-static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
-{
-	u32 shmem, cnt, validity_offset, val;
-	int rc = 0;
-
-	msleep(100);
-
-	/* Get shmem offset */
-	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-	if (shmem == 0) {
-		BNX2X_ERR("Shmem 0 return failure\n");
-		rc = -ENOTTY;
-		goto exit_lbl;
-	}
-
-	validity_offset = offsetof(struct shmem_region, validity_map[0]);
-
-	/* Wait for MCP to come up */
-	for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
-		/* TBD: its best to check validity map of last port.
-		 * currently checks on port 0.
-		 */
-		val = REG_RD(bp, shmem + validity_offset);
-		DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
-		   shmem + validity_offset, val);
-
-		/* check that shared memory is valid. */
-		if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-		    == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-			break;
-
-		bnx2x_mcp_wait_one(bp);
-	}
-
-	DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
-
-	/* Check that shared memory is valid. This indicates that MCP is up. */
-	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
-	    (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
-		BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
-		rc = -ENOTTY;
-		goto exit_lbl;
-	}
-
-exit_lbl:
-	/* Restore the `magic' bit value */
-	if (!CHIP_IS_E1(bp))
-		bnx2x_clp_reset_done(bp, magic_val);
-
-	return rc;
-}
-
-static void bnx2x_pxp_prep(struct bnx2x *bp)
-{
-	if (!CHIP_IS_E1(bp)) {
-		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
-		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
-		REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
-		mmiowb();
-	}
-}
-
-/*
- * Reset the whole chip except for:
- *      - PCIE core
- *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
- *              one reset bit)
- *      - IGU
- *      - MISC (including AEU)
- *      - GRC
- *      - RBCN, RBCP
- */
-static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
-{
-	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
-
-	not_reset_mask1 =
-		MISC_REGISTERS_RESET_REG_1_RST_HC |
-		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
-		MISC_REGISTERS_RESET_REG_1_RST_PXP;
-
-	not_reset_mask2 =
-		MISC_REGISTERS_RESET_REG_2_RST_MDIO |
-		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
-		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
-		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
-		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
-		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
-		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
-		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
-
-	reset_mask1 = 0xffffffff;
-
-	if (CHIP_IS_E1(bp))
-		reset_mask2 = 0xffff;
-	else
-		reset_mask2 = 0x1ffff;
-
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-	       reset_mask1 & (~not_reset_mask1));
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-	       reset_mask2 & (~not_reset_mask2));
-
-	barrier();
-	mmiowb();
-
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
-	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
-	mmiowb();
-}
-
-static int bnx2x_process_kill(struct bnx2x *bp)
-{
-	int cnt = 1000;
-	u32 val = 0;
-	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
-
-
-	/* Empty the Tetris buffer, wait for 1s */
-	do {
-		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
-		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
-		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
-		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
-		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
-		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
-		    ((port_is_idle_0 & 0x1) == 0x1) &&
-		    ((port_is_idle_1 & 0x1) == 0x1) &&
-		    (pgl_exp_rom2 == 0xffffffff))
-			break;
-		msleep(1);
-	} while (cnt-- > 0);
-
-	if (cnt <= 0) {
-		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
-			  " are still"
-			  " outstanding read requests after 1s!\n");
-		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
-			  " port_is_idle_0=0x%08x,"
-			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
-			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
-			  pgl_exp_rom2);
-		return -EAGAIN;
-	}
-
-	barrier();
-
-	/* Close gates #2, #3 and #4 */
-	bnx2x_set_234_gates(bp, true);
-
-	/* TBD: Indicate that "process kill" is in progress to MCP */
-
-	/* Clear "unprepared" bit */
-	REG_WR(bp, MISC_REG_UNPREPARED, 0);
-	barrier();
-
-	/* Make sure all is written to the chip before the reset */
-	mmiowb();
-
-	/* Wait for 1ms to empty GLUE and PCI-E core queues,
-	 * PSWHST, GRC and PSWRD Tetris buffer.
-	 */
-	msleep(1);
-
-	/* Prepare to chip reset: */
-	/* MCP */
-	bnx2x_reset_mcp_prep(bp, &val);
-
-	/* PXP */
-	bnx2x_pxp_prep(bp);
-	barrier();
-
-	/* reset the chip */
-	bnx2x_process_kill_chip_reset(bp);
-	barrier();
-
-	/* Recover after reset: */
-	/* MCP */
-	if (bnx2x_reset_mcp_comp(bp, val))
-		return -EAGAIN;
-
-	/* PXP */
-	bnx2x_pxp_prep(bp);
-
-	/* Open the gates #2, #3 and #4 */
-	bnx2x_set_234_gates(bp, false);
-
-	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
-	 * reset state, re-enable attentions. */
-
-	return 0;
-}
-
-static int bnx2x_leader_reset(struct bnx2x *bp)
-{
-	int rc = 0;
-	/* Try to recover after the failure */
-	if (bnx2x_process_kill(bp)) {
-		printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
-		       bp->dev->name);
-		rc = -EAGAIN;
-		goto exit_leader_reset;
-	}
-
-	/* Clear "reset is in progress" bit and update the driver state */
-	bnx2x_set_reset_done(bp);
-	bp->recovery_state = BNX2X_RECOVERY_DONE;
-
-exit_leader_reset:
-	bp->is_leader = 0;
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-	smp_wmb();
-	return rc;
-}
-
-static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
-
-/* Assumption: runs under rtnl lock. This together with the fact
- * that it's called only from bnx2x_reset_task() ensure that it
- * will never be called when netif_running(bp->dev) is false.
- */
-static void bnx2x_parity_recover(struct bnx2x *bp)
-{
-	DP(NETIF_MSG_HW, "Handling parity\n");
-	while (1) {
-		switch (bp->recovery_state) {
-		case BNX2X_RECOVERY_INIT:
-			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
-			/* Try to get a LEADER_LOCK HW lock */
-			if (bnx2x_trylock_hw_lock(bp,
-				HW_LOCK_RESOURCE_RESERVED_08))
-				bp->is_leader = 1;
-
-			/* Stop the driver */
-			/* If interface has been removed - break */
-			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
-				return;
-
-			bp->recovery_state = BNX2X_RECOVERY_WAIT;
-			/* Ensure "is_leader" and "recovery_state"
-			 *  update values are seen on other CPUs
-			 */
-			smp_wmb();
-			break;
-
-		case BNX2X_RECOVERY_WAIT:
-			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
-			if (bp->is_leader) {
-				u32 load_counter = bnx2x_get_load_cnt(bp);
-				if (load_counter) {
-					/* Wait until all other functions get
-					 * down.
-					 */
-					schedule_delayed_work(&bp->reset_task,
-								HZ/10);
-					return;
-				} else {
-					/* If all other functions got down -
-					 * try to bring the chip back to
-					 * normal. In any case it's an exit
-					 * point for a leader.
-					 */
-					if (bnx2x_leader_reset(bp) ||
-					bnx2x_nic_load(bp, LOAD_NORMAL)) {
-						printk(KERN_ERR"%s: Recovery "
-						"has failed. Power cycle is "
-						"needed.\n", bp->dev->name);
-						/* Disconnect this device */
-						netif_device_detach(bp->dev);
-						/* Block ifup for all function
-						 * of this ASIC until
-						 * "process kill" or power
-						 * cycle.
-						 */
-						bnx2x_set_reset_in_progress(bp);
-						/* Shut down the power */
-						bnx2x_set_power_state(bp,
-								PCI_D3hot);
-						return;
-					}
-
-					return;
-				}
-			} else { /* non-leader */
-				if (!bnx2x_reset_is_done(bp)) {
-					/* Try to get a LEADER_LOCK HW lock as
-					 * long as a former leader may have
-					 * been unloaded by the user or
-					 * released a leadership by another
-					 * reason.
-					 */
-					if (bnx2x_trylock_hw_lock(bp,
-					    HW_LOCK_RESOURCE_RESERVED_08)) {
-						/* I'm a leader now! Restart a
-						 * switch case.
-						 */
-						bp->is_leader = 1;
-						break;
-					}
-
-					schedule_delayed_work(&bp->reset_task,
-								HZ/10);
-					return;
-
-				} else { /* A leader has completed
-					  * the "process kill". It's an exit
-					  * point for a non-leader.
-					  */
-					bnx2x_nic_load(bp, LOAD_NORMAL);
-					bp->recovery_state =
-						BNX2X_RECOVERY_DONE;
-					smp_wmb();
-					return;
-				}
-			}
-		default:
-			return;
-		}
-	}
-}
-
-/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
- * scheduled on a general queue in order to prevent a dead lock.
- */
-static void bnx2x_reset_task(struct work_struct *work)
-{
-	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
-
-#ifdef BNX2X_STOP_ON_ERROR
-	BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
-		  " so reset not done to allow debug dump,\n"
-	 KERN_ERR " you will need to reboot when done\n");
-	return;
-#endif
-
-	rtnl_lock();
-
-	if (!netif_running(bp->dev))
-		goto reset_task_exit;
-
-	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
-		bnx2x_parity_recover(bp);
-	else {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-reset_task_exit:
-	rtnl_unlock();
-}
-
-/* end of nic load/unload */
-
-/* ethtool_ops */
-
-/*
- * Init service functions
- */
-
-static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
-{
-	switch (func) {
-	case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
-	case 1:	return PXP2_REG_PGL_PRETEND_FUNC_F1;
-	case 2:	return PXP2_REG_PGL_PRETEND_FUNC_F2;
-	case 3:	return PXP2_REG_PGL_PRETEND_FUNC_F3;
-	case 4:	return PXP2_REG_PGL_PRETEND_FUNC_F4;
-	case 5:	return PXP2_REG_PGL_PRETEND_FUNC_F5;
-	case 6:	return PXP2_REG_PGL_PRETEND_FUNC_F6;
-	case 7:	return PXP2_REG_PGL_PRETEND_FUNC_F7;
-	default:
-		BNX2X_ERR("Unsupported function index: %d\n", func);
-		return (u32)(-1);
-	}
-}
-
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
-{
-	u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
-
-	/* Flush all outstanding writes */
-	mmiowb();
-
-	/* Pretend to be function 0 */
-	REG_WR(bp, reg, 0);
-	/* Flush the GRC transaction (in the chip) */
-	new_val = REG_RD(bp, reg);
-	if (new_val != 0) {
-		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
-			  new_val);
-		BUG();
-	}
-
-	/* From now we are in the "like-E1" mode */
-	bnx2x_int_disable(bp);
-
-	/* Flush all outstanding writes */
-	mmiowb();
-
-	/* Restore the original funtion settings */
-	REG_WR(bp, reg, orig_func);
-	new_val = REG_RD(bp, reg);
-	if (new_val != orig_func) {
-		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
-			  orig_func, new_val);
-		BUG();
-	}
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
-{
-	if (CHIP_IS_E1H(bp))
-		bnx2x_undi_int_disable_e1h(bp, func);
-	else
-		bnx2x_int_disable(bp);
-}
-
-static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
-{
-	u32 val;
-
-	/* Check if there is any driver already loaded */
-	val = REG_RD(bp, MISC_REG_UNPREPARED);
-	if (val == 0x1) {
-		/* Check if it is the UNDI driver
-		 * UNDI driver initializes CID offset for normal bell to 0x7
-		 */
-		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
-		if (val == 0x7) {
-			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-			/* save our func */
-			int func = BP_FUNC(bp);
-			u32 swap_en;
-			u32 swap_val;
-
-			/* clear the UNDI indication */
-			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
-
-			BNX2X_DEV_INFO("UNDI is active! reset device\n");
-
-			/* try unload UNDI on port 0 */
-			bp->func = 0;
-			bp->fw_seq =
-			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-				DRV_MSG_SEQ_NUMBER_MASK);
-			reset_code = bnx2x_fw_command(bp, reset_code);
-
-			/* if UNDI is loaded on the other port */
-			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
-
-				/* send "DONE" for previous unload */
-				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-
-				/* unload UNDI on port 1 */
-				bp->func = 1;
-				bp->fw_seq =
-			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-					DRV_MSG_SEQ_NUMBER_MASK);
-				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-				bnx2x_fw_command(bp, reset_code);
-			}
-
-			/* now it's safe to release the lock */
-			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
-			bnx2x_undi_int_disable(bp, func);
-
-			/* close input traffic and wait for it */
-			/* Do not rcv packets to BRB */
-			REG_WR(bp,
-			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
-					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
-			/* Do not direct rcv packets that are not for MCP to
-			 * the BRB */
-			REG_WR(bp,
-			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
-					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-			/* clear AEU */
-			REG_WR(bp,
-			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
-			msleep(10);
-
-			/* save NIG port swap info */
-			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-			/* reset device */
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-			       0xd3ffffff);
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-			       0x1403);
-			/* take the NIG out of reset and restore swap values */
-			REG_WR(bp,
-			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
-			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
-			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
-
-			/* send unload done to the MCP */
-			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-
-			/* restore our func and fw_seq */
-			bp->func = func;
-			bp->fw_seq =
-			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-				DRV_MSG_SEQ_NUMBER_MASK);
-
-		} else
-			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-	}
-}
-
-static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
-{
-	u32 val, val2, val3, val4, id;
-	u16 pmc;
-
-	/* Get the chip revision id and number. */
-	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
-	val = REG_RD(bp, MISC_REG_CHIP_NUM);
-	id = ((val & 0xffff) << 16);
-	val = REG_RD(bp, MISC_REG_CHIP_REV);
-	id |= ((val & 0xf) << 12);
-	val = REG_RD(bp, MISC_REG_CHIP_METAL);
-	id |= ((val & 0xff) << 4);
-	val = REG_RD(bp, MISC_REG_BOND_ID);
-	id |= (val & 0xf);
-	bp->common.chip_id = id;
-	bp->link_params.chip_id = bp->common.chip_id;
-	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
-
-	val = (REG_RD(bp, 0x2874) & 0x55);
-	if ((bp->common.chip_id & 0x1) ||
-	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
-		bp->flags |= ONE_PORT_FLAG;
-		BNX2X_DEV_INFO("single port device\n");
-	}
-
-	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
-	bp->common.flash_size = (NVRAM_1MB_SIZE <<
-				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
-	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
-		       bp->common.flash_size, bp->common.flash_size);
-
-	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-	bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
-	bp->link_params.shmem_base = bp->common.shmem_base;
-	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
-		       bp->common.shmem_base, bp->common.shmem2_base);
-
-	if (!bp->common.shmem_base ||
-	    (bp->common.shmem_base < 0xA0000) ||
-	    (bp->common.shmem_base >= 0xC0000)) {
-		BNX2X_DEV_INFO("MCP not active\n");
-		bp->flags |= NO_MCP_FLAG;
-		return;
-	}
-
-	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
-	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-		BNX2X_ERROR("BAD MCP validity signature\n");
-
-	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
-	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
-
-	bp->link_params.hw_led_mode = ((bp->common.hw_config &
-					SHARED_HW_CFG_LED_MODE_MASK) >>
-				       SHARED_HW_CFG_LED_MODE_SHIFT);
-
-	bp->link_params.feature_config_flags = 0;
-	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
-	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
-		bp->link_params.feature_config_flags |=
-				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
-	else
-		bp->link_params.feature_config_flags &=
-				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
-
-	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
-	bp->common.bc_ver = val;
-	BNX2X_DEV_INFO("bc_ver %X\n", val);
-	if (val < BNX2X_BC_VER) {
-		/* for now only warn
-		 * later we might need to enforce this */
-		BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
-			    "please upgrade BC\n", BNX2X_BC_VER, val);
-	}
-	bp->link_params.feature_config_flags |=
-		(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
-		FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
-
-	if (BP_E1HVN(bp) == 0) {
-		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
-		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
-	} else {
-		/* no WOL capability for E1HVN != 0 */
-		bp->flags |= NO_WOL_FLAG;
-	}
-	BNX2X_DEV_INFO("%sWoL capable\n",
-		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
-
-	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
-	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
-	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
-	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
-
-	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
-		 val, val2, val3, val4);
-}
-
-static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
-						    u32 switch_cfg)
-{
-	int port = BP_PORT(bp);
-	u32 ext_phy_type;
-
-	switch (switch_cfg) {
-	case SWITCH_CFG_1G:
-		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
-
-		ext_phy_type =
-			SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-		switch (ext_phy_type) {
-		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10baseT_Half |
-					       SUPPORTED_10baseT_Full |
-					       SUPPORTED_100baseT_Half |
-					       SUPPORTED_100baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_2500baseX_Full |
-					       SUPPORTED_TP |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10baseT_Half |
-					       SUPPORTED_10baseT_Full |
-					       SUPPORTED_100baseT_Half |
-					       SUPPORTED_100baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_TP |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		default:
-			BNX2X_ERR("NVRAM config error. "
-				  "BAD SerDes ext_phy_config 0x%x\n",
-				  bp->link_params.ext_phy_config);
-			return;
-		}
-
-		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
-					   port*0x10);
-		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-		break;
-
-	case SWITCH_CFG_10G:
-		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
-
-		ext_phy_type =
-			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-		switch (ext_phy_type) {
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10baseT_Half |
-					       SUPPORTED_10baseT_Full |
-					       SUPPORTED_100baseT_Half |
-					       SUPPORTED_100baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_2500baseX_Full |
-					       SUPPORTED_10000baseT_Full |
-					       SUPPORTED_TP |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_2500baseX_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_FIBRE |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10000baseT_Full |
-					       SUPPORTED_TP |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
-			BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
-				       ext_phy_type);
-
-			bp->port.supported |= (SUPPORTED_10baseT_Half |
-					       SUPPORTED_10baseT_Full |
-					       SUPPORTED_100baseT_Half |
-					       SUPPORTED_100baseT_Full |
-					       SUPPORTED_1000baseT_Full |
-					       SUPPORTED_10000baseT_Full |
-					       SUPPORTED_TP |
-					       SUPPORTED_Autoneg |
-					       SUPPORTED_Pause |
-					       SUPPORTED_Asym_Pause);
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
-			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
-				  bp->link_params.ext_phy_config);
-			break;
-
-		default:
-			BNX2X_ERR("NVRAM config error. "
-				  "BAD XGXS ext_phy_config 0x%x\n",
-				  bp->link_params.ext_phy_config);
-			return;
-		}
-
-		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
-					   port*0x18);
-		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-
-		break;
-
-	default:
-		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
-			  bp->port.link_config);
-		return;
-	}
-	bp->link_params.phy_addr = bp->port.phy_addr;
-
-	/* mask what we support according to speed_cap_mask */
-	if (!(bp->link_params.speed_cap_mask &
-				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
-		bp->port.supported &= ~SUPPORTED_10baseT_Half;
-
-	if (!(bp->link_params.speed_cap_mask &
-				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
-		bp->port.supported &= ~SUPPORTED_10baseT_Full;
-
-	if (!(bp->link_params.speed_cap_mask &
-				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
-		bp->port.supported &= ~SUPPORTED_100baseT_Half;
-
-	if (!(bp->link_params.speed_cap_mask &
-				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
-		bp->port.supported &= ~SUPPORTED_100baseT_Full;
-
-	if (!(bp->link_params.speed_cap_mask &
-					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
-		bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
-					SUPPORTED_1000baseT_Full);
-
-	if (!(bp->link_params.speed_cap_mask &
-					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
-		bp->port.supported &= ~SUPPORTED_2500baseX_Full;
-
-	if (!(bp->link_params.speed_cap_mask &
-					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
-		bp->port.supported &= ~SUPPORTED_10000baseT_Full;
-
-	BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
-}
-
-static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
-{
-	bp->link_params.req_duplex = DUPLEX_FULL;
-
-	switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
-	case PORT_FEATURE_LINK_SPEED_AUTO:
-		if (bp->port.supported & SUPPORTED_Autoneg) {
-			bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-			bp->port.advertising = bp->port.supported;
-		} else {
-			u32 ext_phy_type =
-			    XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
-			if ((ext_phy_type ==
-			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
-			    (ext_phy_type ==
-			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
-				/* force 10G, no AN */
-				bp->link_params.req_line_speed = SPEED_10000;
-				bp->port.advertising =
-						(ADVERTISED_10000baseT_Full |
-						 ADVERTISED_FIBRE);
-				break;
-			}
-			BNX2X_ERR("NVRAM config error. "
-				  "Invalid link_config 0x%x"
-				  "  Autoneg not supported\n",
-				  bp->port.link_config);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_10M_FULL:
-		if (bp->port.supported & SUPPORTED_10baseT_Full) {
-			bp->link_params.req_line_speed = SPEED_10;
-			bp->port.advertising = (ADVERTISED_10baseT_Full |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_10M_HALF:
-		if (bp->port.supported & SUPPORTED_10baseT_Half) {
-			bp->link_params.req_line_speed = SPEED_10;
-			bp->link_params.req_duplex = DUPLEX_HALF;
-			bp->port.advertising = (ADVERTISED_10baseT_Half |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_100M_FULL:
-		if (bp->port.supported & SUPPORTED_100baseT_Full) {
-			bp->link_params.req_line_speed = SPEED_100;
-			bp->port.advertising = (ADVERTISED_100baseT_Full |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_100M_HALF:
-		if (bp->port.supported & SUPPORTED_100baseT_Half) {
-			bp->link_params.req_line_speed = SPEED_100;
-			bp->link_params.req_duplex = DUPLEX_HALF;
-			bp->port.advertising = (ADVERTISED_100baseT_Half |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_1G:
-		if (bp->port.supported & SUPPORTED_1000baseT_Full) {
-			bp->link_params.req_line_speed = SPEED_1000;
-			bp->port.advertising = (ADVERTISED_1000baseT_Full |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_2_5G:
-		if (bp->port.supported & SUPPORTED_2500baseX_Full) {
-			bp->link_params.req_line_speed = SPEED_2500;
-			bp->port.advertising = (ADVERTISED_2500baseX_Full |
-						ADVERTISED_TP);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	case PORT_FEATURE_LINK_SPEED_10G_CX4:
-	case PORT_FEATURE_LINK_SPEED_10G_KX4:
-	case PORT_FEATURE_LINK_SPEED_10G_KR:
-		if (bp->port.supported & SUPPORTED_10000baseT_Full) {
-			bp->link_params.req_line_speed = SPEED_10000;
-			bp->port.advertising = (ADVERTISED_10000baseT_Full |
-						ADVERTISED_FIBRE);
-		} else {
-			BNX2X_ERROR("NVRAM config error. "
-				    "Invalid link_config 0x%x"
-				    "  speed_cap_mask 0x%x\n",
-				    bp->port.link_config,
-				    bp->link_params.speed_cap_mask);
-			return;
-		}
-		break;
-
-	default:
-		BNX2X_ERROR("NVRAM config error. "
-			    "BAD link speed link_config 0x%x\n",
-			    bp->port.link_config);
-		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-		bp->port.advertising = bp->port.supported;
-		break;
-	}
-
-	bp->link_params.req_flow_ctrl = (bp->port.link_config &
-					 PORT_FEATURE_FLOW_CONTROL_MASK);
-	if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
-	    !(bp->port.supported & SUPPORTED_Autoneg))
-		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
-	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
-		       "  advertising 0x%x\n",
-		       bp->link_params.req_line_speed,
-		       bp->link_params.req_duplex,
-		       bp->link_params.req_flow_ctrl, bp->port.advertising);
-}
-
-static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
-{
-	mac_hi = cpu_to_be16(mac_hi);
-	mac_lo = cpu_to_be32(mac_lo);
-	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
-	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
-}
-
-static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	u32 val, val2;
-	u32 config;
-	u16 i;
-	u32 ext_phy_type;
-
-	bp->link_params.bp = bp;
-	bp->link_params.port = port;
-
-	bp->link_params.lane_config =
-		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
-	bp->link_params.ext_phy_config =
-		SHMEM_RD(bp,
-			 dev_info.port_hw_config[port].external_phy_config);
-	/* BCM8727_NOC => BCM8727 no over current */
-	if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-	    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
-		bp->link_params.ext_phy_config &=
-			~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-		bp->link_params.ext_phy_config |=
-			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
-		bp->link_params.feature_config_flags |=
-			FEATURE_CONFIG_BCM8727_NOC;
-	}
-
-	bp->link_params.speed_cap_mask =
-		SHMEM_RD(bp,
-			 dev_info.port_hw_config[port].speed_capability_mask);
-
-	bp->port.link_config =
-		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
-
-	/* Get the 4 lanes xgxs config rx and tx */
-	for (i = 0; i < 2; i++) {
-		val = SHMEM_RD(bp,
-			   dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
-		bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
-		bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
-
-		val = SHMEM_RD(bp,
-			   dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
-		bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
-		bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
-	}
-
-	/* If the device is capable of WoL, set the default state according
-	 * to the HW
-	 */
-	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
-	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
-		   (config & PORT_FEATURE_WOL_ENABLED));
-
-	BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
-		       "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
-		       bp->link_params.lane_config,
-		       bp->link_params.ext_phy_config,
-		       bp->link_params.speed_cap_mask, bp->port.link_config);
-
-	bp->link_params.switch_cfg |= (bp->port.link_config &
-				       PORT_FEATURE_CONNECTED_SWITCH_MASK);
-	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
-
-	bnx2x_link_settings_requested(bp);
-
-	/*
-	 * If connected directly, work with the internal PHY, otherwise, work
-	 * with the external PHY
-	 */
-	ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
-		bp->mdio.prtad = bp->link_params.phy_addr;
-
-	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
-		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
-		bp->mdio.prtad =
-			XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
-
-	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
-	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
-	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
-	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-
-#ifdef BCM_CNIC
-	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
-	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
-	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-#endif
-}
-
-static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-	u32 val, val2;
-	int rc = 0;
-
-	bnx2x_get_common_hwinfo(bp);
-
-	bp->e1hov = 0;
-	bp->e1hmf = 0;
-	if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
-		bp->mf_config =
-			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-
-		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
-		       FUNC_MF_CFG_E1HOV_TAG_MASK);
-		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
-			bp->e1hmf = 1;
-		BNX2X_DEV_INFO("%s function mode\n",
-			       IS_E1HMF(bp) ? "multi" : "single");
-
-		if (IS_E1HMF(bp)) {
-			val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
-								e1hov_tag) &
-			       FUNC_MF_CFG_E1HOV_TAG_MASK);
-			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
-				bp->e1hov = val;
-				BNX2X_DEV_INFO("E1HOV for func %d is %d "
-					       "(0x%04x)\n",
-					       func, bp->e1hov, bp->e1hov);
-			} else {
-				BNX2X_ERROR("No valid E1HOV for func %d,"
-					    "  aborting\n", func);
-				rc = -EPERM;
-			}
-		} else {
-			if (BP_E1HVN(bp)) {
-				BNX2X_ERROR("VN %d in single function mode,"
-					    "  aborting\n", BP_E1HVN(bp));
-				rc = -EPERM;
-			}
-		}
-	}
-
-	if (!BP_NOMCP(bp)) {
-		bnx2x_get_port_hwinfo(bp);
-
-		bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
-			      DRV_MSG_SEQ_NUMBER_MASK);
-		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-	}
-
-	if (IS_E1HMF(bp)) {
-		val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
-		val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
-		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
-		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
-			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
-			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
-			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
-			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
-			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
-			bp->dev->dev_addr[5] = (u8)(val & 0xff);
-			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
-			       ETH_ALEN);
-			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
-			       ETH_ALEN);
-		}
-
-		return rc;
-	}
-
-	if (BP_NOMCP(bp)) {
-		/* only supposed to happen on emulation/FPGA */
-		BNX2X_ERROR("warning: random MAC workaround active\n");
-		random_ether_addr(bp->dev->dev_addr);
-		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-	}
-
-	return rc;
-}
-
-static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
-{
-	int cnt, i, block_end, rodi;
-	char vpd_data[BNX2X_VPD_LEN+1];
-	char str_id_reg[VENDOR_ID_LEN+1];
-	char str_id_cap[VENDOR_ID_LEN+1];
-	u8 len;
-
-	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
-	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
-
-	if (cnt < BNX2X_VPD_LEN)
-		goto out_not_found;
-
-	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
-			     PCI_VPD_LRDT_RO_DATA);
-	if (i < 0)
-		goto out_not_found;
-
-
-	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-		    pci_vpd_lrdt_size(&vpd_data[i]);
-
-	i += PCI_VPD_LRDT_TAG_SIZE;
-
-	if (block_end > BNX2X_VPD_LEN)
-		goto out_not_found;
-
-	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
-				   PCI_VPD_RO_KEYWORD_MFR_ID);
-	if (rodi < 0)
-		goto out_not_found;
-
-	len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
-	if (len != VENDOR_ID_LEN)
-		goto out_not_found;
-
-	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
-	/* vendor specific info */
-	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
-	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
-	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
-	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
-		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
-						PCI_VPD_RO_KEYWORD_VENDOR0);
-		if (rodi >= 0) {
-			len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
-			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
-			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
-				memcpy(bp->fw_ver, &vpd_data[rodi], len);
-				bp->fw_ver[len] = ' ';
-			}
-		}
-		return;
-	}
-out_not_found:
-	return;
-}
-
-static int __devinit bnx2x_init_bp(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
-	int timer_interval;
-	int rc;
-
-	/* Disable interrupt handling until HW is initialized */
-	atomic_set(&bp->intr_sem, 1);
-	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
-	mutex_init(&bp->port.phy_mutex);
-	mutex_init(&bp->fw_mb_mutex);
-#ifdef BCM_CNIC
-	mutex_init(&bp->cnic_mutex);
-#endif
-
-	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
-	INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
-
-	rc = bnx2x_get_hwinfo(bp);
-
-	bnx2x_read_fwinfo(bp);
-	/* need to reset chip if undi was active */
-	if (!BP_NOMCP(bp))
-		bnx2x_undi_unload(bp);
-
-	if (CHIP_REV_IS_FPGA(bp))
-		dev_err(&bp->pdev->dev, "FPGA detected\n");
-
-	if (BP_NOMCP(bp) && (func == 0))
-		dev_err(&bp->pdev->dev, "MCP disabled, "
-					"must load devices in order!\n");
-
-	/* Set multi queue mode */
-	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
-	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
-		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
-					"requested is not MSI-X\n");
-		multi_mode = ETH_RSS_MODE_DISABLED;
-	}
-	bp->multi_mode = multi_mode;
-
-
-	bp->dev->features |= NETIF_F_GRO;
-
-	/* Set TPA flags */
-	if (disable_tpa) {
-		bp->flags &= ~TPA_ENABLE_FLAG;
-		bp->dev->features &= ~NETIF_F_LRO;
-	} else {
-		bp->flags |= TPA_ENABLE_FLAG;
-		bp->dev->features |= NETIF_F_LRO;
-	}
-
-	if (CHIP_IS_E1(bp))
-		bp->dropless_fc = 0;
-	else
-		bp->dropless_fc = dropless_fc;
-
-	bp->mrrs = mrrs;
-
-	bp->tx_ring_size = MAX_TX_AVAIL;
-	bp->rx_ring_size = MAX_RX_AVAIL;
-
-	bp->rx_csum = 1;
-
-	/* make sure that the numbers are in the right granularity */
-	bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
-	bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
-
-	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
-	bp->current_interval = (poll ? poll : timer_interval);
-
-	init_timer(&bp->timer);
-	bp->timer.expires = jiffies + bp->current_interval;
-	bp->timer.data = (unsigned long) bp;
-	bp->timer.function = bnx2x_timer;
-
-	return rc;
-}
-
-/*
- * ethtool service functions
- */
-
-/* All ethtool functions called with rtnl_lock */
-
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	cmd->supported = bp->port.supported;
-	cmd->advertising = bp->port.advertising;
-
-	if ((bp->state == BNX2X_STATE_OPEN) &&
-	    !(bp->flags & MF_FUNC_DIS) &&
-	    (bp->link_vars.link_up)) {
-		cmd->speed = bp->link_vars.line_speed;
-		cmd->duplex = bp->link_vars.duplex;
-		if (IS_E1HMF(bp)) {
-			u16 vn_max_rate;
-
-			vn_max_rate =
-				((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
-				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-			if (vn_max_rate < cmd->speed)
-				cmd->speed = vn_max_rate;
-		}
-	} else {
-		cmd->speed = -1;
-		cmd->duplex = -1;
-	}
-
-	if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
-		u32 ext_phy_type =
-			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
-		switch (ext_phy_type) {
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-			cmd->port = PORT_FIBRE;
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
-			cmd->port = PORT_TP;
-			break;
-
-		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
-			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
-				  bp->link_params.ext_phy_config);
-			break;
-
-		default:
-			DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
-			   bp->link_params.ext_phy_config);
-			break;
-		}
-	} else
-		cmd->port = PORT_TP;
-
-	cmd->phy_address = bp->mdio.prtad;
-	cmd->transceiver = XCVR_INTERNAL;
-
-	if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
-		cmd->autoneg = AUTONEG_ENABLE;
-	else
-		cmd->autoneg = AUTONEG_DISABLE;
-
-	cmd->maxtxpkt = 0;
-	cmd->maxrxpkt = 0;
-
-	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
-	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
-	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
-	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
-
-	return 0;
-}
-
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	u32 advertising;
-
-	if (IS_E1HMF(bp))
-		return 0;
-
-	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
-	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
-	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
-	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
-
-	if (cmd->autoneg == AUTONEG_ENABLE) {
-		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
-			DP(NETIF_MSG_LINK, "Autoneg not supported\n");
-			return -EINVAL;
-		}
-
-		/* advertise the requested speed and duplex if supported */
-		cmd->advertising &= bp->port.supported;
-
-		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-		bp->link_params.req_duplex = DUPLEX_FULL;
-		bp->port.advertising |= (ADVERTISED_Autoneg |
-					 cmd->advertising);
-
-	} else { /* forced speed */
-		/* advertise the requested speed and duplex if supported */
-		switch (cmd->speed) {
-		case SPEED_10:
-			if (cmd->duplex == DUPLEX_FULL) {
-				if (!(bp->port.supported &
-				      SUPPORTED_10baseT_Full)) {
-					DP(NETIF_MSG_LINK,
-					   "10M full not supported\n");
-					return -EINVAL;
-				}
-
-				advertising = (ADVERTISED_10baseT_Full |
-					       ADVERTISED_TP);
-			} else {
-				if (!(bp->port.supported &
-				      SUPPORTED_10baseT_Half)) {
-					DP(NETIF_MSG_LINK,
-					   "10M half not supported\n");
-					return -EINVAL;
-				}
-
-				advertising = (ADVERTISED_10baseT_Half |
-					       ADVERTISED_TP);
-			}
-			break;
-
-		case SPEED_100:
-			if (cmd->duplex == DUPLEX_FULL) {
-				if (!(bp->port.supported &
-						SUPPORTED_100baseT_Full)) {
-					DP(NETIF_MSG_LINK,
-					   "100M full not supported\n");
-					return -EINVAL;
-				}
-
-				advertising = (ADVERTISED_100baseT_Full |
-					       ADVERTISED_TP);
-			} else {
-				if (!(bp->port.supported &
-						SUPPORTED_100baseT_Half)) {
-					DP(NETIF_MSG_LINK,
-					   "100M half not supported\n");
-					return -EINVAL;
-				}
-
-				advertising = (ADVERTISED_100baseT_Half |
-					       ADVERTISED_TP);
-			}
-			break;
-
-		case SPEED_1000:
-			if (cmd->duplex != DUPLEX_FULL) {
-				DP(NETIF_MSG_LINK, "1G half not supported\n");
-				return -EINVAL;
-			}
-
-			if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
-				DP(NETIF_MSG_LINK, "1G full not supported\n");
-				return -EINVAL;
-			}
-
-			advertising = (ADVERTISED_1000baseT_Full |
-				       ADVERTISED_TP);
-			break;
-
-		case SPEED_2500:
-			if (cmd->duplex != DUPLEX_FULL) {
-				DP(NETIF_MSG_LINK,
-				   "2.5G half not supported\n");
-				return -EINVAL;
-			}
-
-			if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
-				DP(NETIF_MSG_LINK,
-				   "2.5G full not supported\n");
-				return -EINVAL;
-			}
-
-			advertising = (ADVERTISED_2500baseX_Full |
-				       ADVERTISED_TP);
-			break;
-
-		case SPEED_10000:
-			if (cmd->duplex != DUPLEX_FULL) {
-				DP(NETIF_MSG_LINK, "10G half not supported\n");
-				return -EINVAL;
-			}
-
-			if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
-				DP(NETIF_MSG_LINK, "10G full not supported\n");
-				return -EINVAL;
-			}
-
-			advertising = (ADVERTISED_10000baseT_Full |
-				       ADVERTISED_FIBRE);
-			break;
-
-		default:
-			DP(NETIF_MSG_LINK, "Unsupported speed\n");
-			return -EINVAL;
-		}
-
-		bp->link_params.req_line_speed = cmd->speed;
-		bp->link_params.req_duplex = cmd->duplex;
-		bp->port.advertising = advertising;
-	}
-
-	DP(NETIF_MSG_LINK, "req_line_speed %d\n"
-	   DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
-	   bp->link_params.req_line_speed, bp->link_params.req_duplex,
-	   bp->port.advertising);
-
-	if (netif_running(dev)) {
-		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-		bnx2x_link_set(bp);
-	}
-
-	return 0;
-}
-
-#define IS_E1_ONLINE(info)	(((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info)	(((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
-
-static int bnx2x_get_regs_len(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int regdump_len = 0;
-	int i;
-
-	if (CHIP_IS_E1(bp)) {
-		for (i = 0; i < REGS_COUNT; i++)
-			if (IS_E1_ONLINE(reg_addrs[i].info))
-				regdump_len += reg_addrs[i].size;
-
-		for (i = 0; i < WREGS_COUNT_E1; i++)
-			if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
-				regdump_len += wreg_addrs_e1[i].size *
-					(1 + wreg_addrs_e1[i].read_regs_count);
-
-	} else { /* E1H */
-		for (i = 0; i < REGS_COUNT; i++)
-			if (IS_E1H_ONLINE(reg_addrs[i].info))
-				regdump_len += reg_addrs[i].size;
-
-		for (i = 0; i < WREGS_COUNT_E1H; i++)
-			if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
-				regdump_len += wreg_addrs_e1h[i].size *
-					(1 + wreg_addrs_e1h[i].read_regs_count);
-	}
-	regdump_len *= 4;
-	regdump_len += sizeof(struct dump_hdr);
-
-	return regdump_len;
-}
-
-static void bnx2x_get_regs(struct net_device *dev,
-			   struct ethtool_regs *regs, void *_p)
-{
-	u32 *p = _p, i, j;
-	struct bnx2x *bp = netdev_priv(dev);
-	struct dump_hdr dump_hdr = {0};
-
-	regs->version = 0;
-	memset(p, 0, regs->len);
-
-	if (!netif_running(bp->dev))
-		return;
-
-	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
-	dump_hdr.dump_sign = dump_sign_all;
-	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
-	dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
-	dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
-	dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
-	dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
-
-	memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
-	p += dump_hdr.hdr_size + 1;
-
-	if (CHIP_IS_E1(bp)) {
-		for (i = 0; i < REGS_COUNT; i++)
-			if (IS_E1_ONLINE(reg_addrs[i].info))
-				for (j = 0; j < reg_addrs[i].size; j++)
-					*p++ = REG_RD(bp,
-						      reg_addrs[i].addr + j*4);
-
-	} else { /* E1H */
-		for (i = 0; i < REGS_COUNT; i++)
-			if (IS_E1H_ONLINE(reg_addrs[i].info))
-				for (j = 0; j < reg_addrs[i].size; j++)
-					*p++ = REG_RD(bp,
-						      reg_addrs[i].addr + j*4);
-	}
-}
-
-#define PHY_FW_VER_LEN			10
-
-static void bnx2x_get_drvinfo(struct net_device *dev,
-			      struct ethtool_drvinfo *info)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	u8 phy_fw_ver[PHY_FW_VER_LEN];
-
-	strcpy(info->driver, DRV_MODULE_NAME);
-	strcpy(info->version, DRV_MODULE_VERSION);
-
-	phy_fw_ver[0] = '\0';
-	if (bp->port.pmf) {
-		bnx2x_acquire_phy_lock(bp);
-		bnx2x_get_ext_phy_fw_version(&bp->link_params,
-					     (bp->state != BNX2X_STATE_CLOSED),
-					     phy_fw_ver, PHY_FW_VER_LEN);
-		bnx2x_release_phy_lock(bp);
-	}
-
-	strncpy(info->fw_version, bp->fw_ver, 32);
-	snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
-		 "bc %d.%d.%d%s%s",
-		 (bp->common.bc_ver & 0xff0000) >> 16,
-		 (bp->common.bc_ver & 0xff00) >> 8,
-		 (bp->common.bc_ver & 0xff),
-		 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
-	strcpy(info->bus_info, pci_name(bp->pdev));
-	info->n_stats = BNX2X_NUM_STATS;
-	info->testinfo_len = BNX2X_NUM_TESTS;
-	info->eedump_len = bp->common.flash_size;
-	info->regdump_len = bnx2x_get_regs_len(dev);
-}
-
-static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (bp->flags & NO_WOL_FLAG) {
-		wol->supported = 0;
-		wol->wolopts = 0;
-	} else {
-		wol->supported = WAKE_MAGIC;
-		if (bp->wol)
-			wol->wolopts = WAKE_MAGIC;
-		else
-			wol->wolopts = 0;
-	}
-	memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (wol->wolopts & ~WAKE_MAGIC)
-		return -EINVAL;
-
-	if (wol->wolopts & WAKE_MAGIC) {
-		if (bp->flags & NO_WOL_FLAG)
-			return -EINVAL;
-
-		bp->wol = 1;
-	} else
-		bp->wol = 0;
-
-	return 0;
-}
-
-static u32 bnx2x_get_msglevel(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	return bp->msg_enable;
-}
-
-static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (capable(CAP_NET_ADMIN))
-		bp->msg_enable = level;
-}
-
-static int bnx2x_nway_reset(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (!bp->port.pmf)
-		return 0;
-
-	if (netif_running(dev)) {
-		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-		bnx2x_link_set(bp);
-	}
-
-	return 0;
-}
-
-static u32 bnx2x_get_link(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (bp->flags & MF_FUNC_DIS)
-		return 0;
-
-	return bp->link_vars.link_up;
-}
-
-static int bnx2x_get_eeprom_len(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	return bp->common.flash_size;
-}
-
-static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int count, i;
-	u32 val = 0;
-
-	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
-	if (CHIP_REV_IS_SLOW(bp))
-		count *= 100;
-
-	/* request access to nvram interface */
-	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
-	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
-
-	for (i = 0; i < count*10; i++) {
-		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
-		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
-			break;
-
-		udelay(5);
-	}
-
-	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
-		DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int bnx2x_release_nvram_lock(struct bnx2x *bp)
-{
-	int port = BP_PORT(bp);
-	int count, i;
-	u32 val = 0;
-
-	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
-	if (CHIP_REV_IS_SLOW(bp))
-		count *= 100;
-
-	/* relinquish nvram interface */
-	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
-	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
-
-	for (i = 0; i < count*10; i++) {
-		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
-		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
-			break;
-
-		udelay(5);
-	}
-
-	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
-		DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static void bnx2x_enable_nvram_access(struct bnx2x *bp)
-{
-	u32 val;
-
-	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
-
-	/* enable both bits, even on read */
-	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
-	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
-		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
-}
-
-static void bnx2x_disable_nvram_access(struct bnx2x *bp)
-{
-	u32 val;
-
-	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
-
-	/* disable both bits, even after read */
-	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
-	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
-			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
-}
-
-static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
-				  u32 cmd_flags)
-{
-	int count, i, rc;
-	u32 val;
-
-	/* build the command word */
-	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
-
-	/* need to clear DONE bit separately */
-	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
-
-	/* address of the NVRAM to read from */
-	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
-	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
-
-	/* issue a read command */
-	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
-
-	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
-	if (CHIP_REV_IS_SLOW(bp))
-		count *= 100;
-
-	/* wait for completion */
-	*ret_val = 0;
-	rc = -EBUSY;
-	for (i = 0; i < count; i++) {
-		udelay(5);
-		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
-
-		if (val & MCPR_NVM_COMMAND_DONE) {
-			val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
-			/* we read nvram data in cpu order
-			 * but ethtool sees it as an array of bytes
-			 * converting to big-endian will do the work */
-			*ret_val = cpu_to_be32(val);
-			rc = 0;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
-			    int buf_size)
-{
-	int rc;
-	u32 cmd_flags;
-	__be32 val;
-
-	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
-		DP(BNX2X_MSG_NVM,
-		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
-		   offset, buf_size);
-		return -EINVAL;
-	}
-
-	if (offset + buf_size > bp->common.flash_size) {
-		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-				  " buf_size (0x%x) > flash_size (0x%x)\n",
-		   offset, buf_size, bp->common.flash_size);
-		return -EINVAL;
-	}
-
-	/* request access to nvram interface */
-	rc = bnx2x_acquire_nvram_lock(bp);
-	if (rc)
-		return rc;
-
-	/* enable access to nvram interface */
-	bnx2x_enable_nvram_access(bp);
-
-	/* read the first word(s) */
-	cmd_flags = MCPR_NVM_COMMAND_FIRST;
-	while ((buf_size > sizeof(u32)) && (rc == 0)) {
-		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
-		memcpy(ret_buf, &val, 4);
-
-		/* advance to the next dword */
-		offset += sizeof(u32);
-		ret_buf += sizeof(u32);
-		buf_size -= sizeof(u32);
-		cmd_flags = 0;
-	}
-
-	if (rc == 0) {
-		cmd_flags |= MCPR_NVM_COMMAND_LAST;
-		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
-		memcpy(ret_buf, &val, 4);
-	}
-
-	/* disable access to nvram interface */
-	bnx2x_disable_nvram_access(bp);
-	bnx2x_release_nvram_lock(bp);
-
-	return rc;
-}
-
-static int bnx2x_get_eeprom(struct net_device *dev,
-			    struct ethtool_eeprom *eeprom, u8 *eebuf)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int rc;
-
-	if (!netif_running(dev))
-		return -EAGAIN;
-
-	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
-	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
-	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
-	   eeprom->len, eeprom->len);
-
-	/* parameters already validated in ethtool_get_eeprom */
-
-	rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
-
-	return rc;
-}
-
-static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
-				   u32 cmd_flags)
-{
-	int count, i, rc;
-
-	/* build the command word */
-	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
-
-	/* need to clear DONE bit separately */
-	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
-
-	/* write the data */
-	REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
-
-	/* address of the NVRAM to write to */
-	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
-	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
-
-	/* issue the write command */
-	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
-
-	/* adjust timeout for emulation/FPGA */
-	count = NVRAM_TIMEOUT_COUNT;
-	if (CHIP_REV_IS_SLOW(bp))
-		count *= 100;
-
-	/* wait for completion */
-	rc = -EBUSY;
-	for (i = 0; i < count; i++) {
-		udelay(5);
-		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
-		if (val & MCPR_NVM_COMMAND_DONE) {
-			rc = 0;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-#define BYTE_OFFSET(offset)		(8 * (offset & 0x03))
-
-static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
-			      int buf_size)
-{
-	int rc;
-	u32 cmd_flags;
-	u32 align_offset;
-	__be32 val;
-
-	if (offset + buf_size > bp->common.flash_size) {
-		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-				  " buf_size (0x%x) > flash_size (0x%x)\n",
-		   offset, buf_size, bp->common.flash_size);
-		return -EINVAL;
-	}
-
-	/* request access to nvram interface */
-	rc = bnx2x_acquire_nvram_lock(bp);
-	if (rc)
-		return rc;
-
-	/* enable access to nvram interface */
-	bnx2x_enable_nvram_access(bp);
-
-	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
-	align_offset = (offset & ~0x03);
-	rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
-
-	if (rc == 0) {
-		val &= ~(0xff << BYTE_OFFSET(offset));
-		val |= (*data_buf << BYTE_OFFSET(offset));
-
-		/* nvram data is returned as an array of bytes
-		 * convert it back to cpu order */
-		val = be32_to_cpu(val);
-
-		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
-					     cmd_flags);
-	}
-
-	/* disable access to nvram interface */
-	bnx2x_disable_nvram_access(bp);
-	bnx2x_release_nvram_lock(bp);
-
-	return rc;
-}
-
-static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
-			     int buf_size)
-{
-	int rc;
-	u32 cmd_flags;
-	u32 val;
-	u32 written_so_far;
-
-	if (buf_size == 1)	/* ethtool */
-		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
-
-	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
-		DP(BNX2X_MSG_NVM,
-		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
-		   offset, buf_size);
-		return -EINVAL;
-	}
-
-	if (offset + buf_size > bp->common.flash_size) {
-		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-				  " buf_size (0x%x) > flash_size (0x%x)\n",
-		   offset, buf_size, bp->common.flash_size);
-		return -EINVAL;
-	}
-
-	/* request access to nvram interface */
-	rc = bnx2x_acquire_nvram_lock(bp);
-	if (rc)
-		return rc;
-
-	/* enable access to nvram interface */
-	bnx2x_enable_nvram_access(bp);
-
-	written_so_far = 0;
-	cmd_flags = MCPR_NVM_COMMAND_FIRST;
-	while ((written_so_far < buf_size) && (rc == 0)) {
-		if (written_so_far == (buf_size - sizeof(u32)))
-			cmd_flags |= MCPR_NVM_COMMAND_LAST;
-		else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
-			cmd_flags |= MCPR_NVM_COMMAND_LAST;
-		else if ((offset % NVRAM_PAGE_SIZE) == 0)
-			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
-
-		memcpy(&val, data_buf, 4);
-
-		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
-
-		/* advance to the next dword */
-		offset += sizeof(u32);
-		data_buf += sizeof(u32);
-		written_so_far += sizeof(u32);
-		cmd_flags = 0;
-	}
-
-	/* disable access to nvram interface */
-	bnx2x_disable_nvram_access(bp);
-	bnx2x_release_nvram_lock(bp);
-
-	return rc;
-}
-
-static int bnx2x_set_eeprom(struct net_device *dev,
-			    struct ethtool_eeprom *eeprom, u8 *eebuf)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int port = BP_PORT(bp);
-	int rc = 0;
-
-	if (!netif_running(dev))
-		return -EAGAIN;
-
-	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
-	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
-	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
-	   eeprom->len, eeprom->len);
-
-	/* parameters already validated in ethtool_set_eeprom */
-
-	/* PHY eeprom can be accessed only by the PMF */
-	if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
-	    !bp->port.pmf)
-		return -EINVAL;
-
-	if (eeprom->magic == 0x50485950) {
-		/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
-		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-		bnx2x_acquire_phy_lock(bp);
-		rc |= bnx2x_link_reset(&bp->link_params,
-				       &bp->link_vars, 0);
-		if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-					PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
-			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-				       MISC_REGISTERS_GPIO_HIGH, port);
-		bnx2x_release_phy_lock(bp);
-		bnx2x_link_report(bp);
-
-	} else if (eeprom->magic == 0x50485952) {
-		/* 'PHYR' (0x50485952): re-init link after FW upgrade */
-		if (bp->state == BNX2X_STATE_OPEN) {
-			bnx2x_acquire_phy_lock(bp);
-			rc |= bnx2x_link_reset(&bp->link_params,
-					       &bp->link_vars, 1);
-
-			rc |= bnx2x_phy_init(&bp->link_params,
-					     &bp->link_vars);
-			bnx2x_release_phy_lock(bp);
-			bnx2x_calc_fc_adv(bp);
-		}
-	} else if (eeprom->magic == 0x53985943) {
-		/* 'PHYC' (0x53985943): PHY FW upgrade completed */
-		if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
-			u8 ext_phy_addr =
-			     XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
-
-			/* DSP Remove Download Mode */
-			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-				       MISC_REGISTERS_GPIO_LOW, port);
-
-			bnx2x_acquire_phy_lock(bp);
-
-			bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
-
-			/* wait 0.5 sec to allow it to run */
-			msleep(500);
-			bnx2x_ext_phy_hw_reset(bp, port);
-			msleep(500);
-			bnx2x_release_phy_lock(bp);
-		}
-	} else
-		rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
-
-	return rc;
-}
-
-static int bnx2x_get_coalesce(struct net_device *dev,
-			      struct ethtool_coalesce *coal)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	memset(coal, 0, sizeof(struct ethtool_coalesce));
-
-	coal->rx_coalesce_usecs = bp->rx_ticks;
-	coal->tx_coalesce_usecs = bp->tx_ticks;
-
-	return 0;
-}
-
-static int bnx2x_set_coalesce(struct net_device *dev,
-			      struct ethtool_coalesce *coal)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
-	if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
-		bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
-
-	bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
-	if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
-		bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
-
-	if (netif_running(dev))
-		bnx2x_update_coalesce(bp);
-
-	return 0;
-}
-
-static void bnx2x_get_ringparam(struct net_device *dev,
-				struct ethtool_ringparam *ering)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	ering->rx_max_pending = MAX_RX_AVAIL;
-	ering->rx_mini_max_pending = 0;
-	ering->rx_jumbo_max_pending = 0;
-
-	ering->rx_pending = bp->rx_ring_size;
-	ering->rx_mini_pending = 0;
-	ering->rx_jumbo_pending = 0;
-
-	ering->tx_max_pending = MAX_TX_AVAIL;
-	ering->tx_pending = bp->tx_ring_size;
-}
-
-static int bnx2x_set_ringparam(struct net_device *dev,
-			       struct ethtool_ringparam *ering)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int rc = 0;
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return -EAGAIN;
-	}
-
-	if ((ering->rx_pending > MAX_RX_AVAIL) ||
-	    (ering->tx_pending > MAX_TX_AVAIL) ||
-	    (ering->tx_pending <= MAX_SKB_FRAGS + 4))
-		return -EINVAL;
-
-	bp->rx_ring_size = ering->rx_pending;
-	bp->tx_ring_size = ering->tx_pending;
-
-	if (netif_running(dev)) {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-	return rc;
-}
-
-static void bnx2x_get_pauseparam(struct net_device *dev,
-				 struct ethtool_pauseparam *epause)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	epause->autoneg = (bp->link_params.req_flow_ctrl ==
-			   BNX2X_FLOW_CTRL_AUTO) &&
-			  (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
-
-	epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
-			    BNX2X_FLOW_CTRL_RX);
-	epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
-			    BNX2X_FLOW_CTRL_TX);
-
-	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
-	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
-	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
-}
-
-static int bnx2x_set_pauseparam(struct net_device *dev,
-				struct ethtool_pauseparam *epause)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (IS_E1HMF(bp))
-		return 0;
-
-	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
-	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
-	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
-
-	bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
-
-	if (epause->rx_pause)
-		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
-
-	if (epause->tx_pause)
-		bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
-
-	if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
-		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
-	if (epause->autoneg) {
-		if (!(bp->port.supported & SUPPORTED_Autoneg)) {
-			DP(NETIF_MSG_LINK, "autoneg not supported\n");
-			return -EINVAL;
-		}
-
-		if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
-			bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
-	}
-
-	DP(NETIF_MSG_LINK,
-	   "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
-
-	if (netif_running(dev)) {
-		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-		bnx2x_link_set(bp);
-	}
-
-	return 0;
-}
-
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int changed = 0;
-	int rc = 0;
-
-	if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
-		return -EINVAL;
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return -EAGAIN;
-	}
-
-	/* TPA requires Rx CSUM offloading */
-	if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
-		if (!disable_tpa) {
-			if (!(dev->features & NETIF_F_LRO)) {
-				dev->features |= NETIF_F_LRO;
-				bp->flags |= TPA_ENABLE_FLAG;
-				changed = 1;
-			}
-		} else
-			rc = -EINVAL;
-	} else if (dev->features & NETIF_F_LRO) {
-		dev->features &= ~NETIF_F_LRO;
-		bp->flags &= ~TPA_ENABLE_FLAG;
-		changed = 1;
-	}
-
-	if (data & ETH_FLAG_RXHASH)
-		dev->features |= NETIF_F_RXHASH;
-	else
-		dev->features &= ~NETIF_F_RXHASH;
-
-	if (changed && netif_running(dev)) {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-	return rc;
-}
-
-static u32 bnx2x_get_rx_csum(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	return bp->rx_csum;
-}
-
-static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int rc = 0;
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return -EAGAIN;
-	}
-
-	bp->rx_csum = data;
-
-	/* Disable TPA, when Rx CSUM is disabled. Otherwise all
-	   TPA'ed packets will be discarded due to wrong TCP CSUM */
-	if (!data) {
-		u32 flags = ethtool_op_get_flags(dev);
-
-		rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
-	}
-
-	return rc;
-}
-
-static int bnx2x_set_tso(struct net_device *dev, u32 data)
-{
-	if (data) {
-		dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
-		dev->features |= NETIF_F_TSO6;
-	} else {
-		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
-		dev->features &= ~NETIF_F_TSO6;
-	}
-
-	return 0;
-}
-
-static const struct {
-	char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
-	{ "register_test (offline)" },
-	{ "memory_test (offline)" },
-	{ "loopback_test (offline)" },
-	{ "nvram_test (online)" },
-	{ "interrupt_test (online)" },
-	{ "link_test (online)" },
-	{ "idle check (online)" }
-};
-
-static int bnx2x_test_registers(struct bnx2x *bp)
-{
-	int idx, i, rc = -ENODEV;
-	u32 wr_val = 0;
-	int port = BP_PORT(bp);
-	static const struct {
-		u32 offset0;
-		u32 offset1;
-		u32 mask;
-	} reg_tbl[] = {
-/* 0 */		{ BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
-		{ DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
-		{ HC_REG_AGG_INT_0,                    4, 0x000003ff },
-		{ PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
-		{ PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
-		{ PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
-		{ PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
-		{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
-		{ PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
-		{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
-/* 10 */	{ PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
-		{ QM_REG_CONNNUM_0,                    4, 0x000fffff },
-		{ TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
-		{ SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
-		{ SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
-		{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
-		{ XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
-		{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
-		{ NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
-		{ NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
-/* 20 */	{ NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
-		{ NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
-		{ NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
-		{ NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
-		{ NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
-		{ NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
-/* 30 */	{ NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
-		{ NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
-		{ NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
-		{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
-		{ NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
-		{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
-		{ NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
-
-		{ 0xffffffff, 0, 0x00000000 }
-	};
-
-	if (!netif_running(bp->dev))
-		return rc;
-
-	/* Repeat the test twice:
-	   First by writing 0x00000000, second by writing 0xffffffff */
-	for (idx = 0; idx < 2; idx++) {
-
-		switch (idx) {
-		case 0:
-			wr_val = 0;
-			break;
-		case 1:
-			wr_val = 0xffffffff;
-			break;
-		}
-
-		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
-			u32 offset, mask, save_val, val;
-
-			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
-			mask = reg_tbl[i].mask;
-
-			save_val = REG_RD(bp, offset);
-
-			REG_WR(bp, offset, (wr_val & mask));
-			val = REG_RD(bp, offset);
-
-			/* Restore the original register's value */
-			REG_WR(bp, offset, save_val);
-
-			/* verify value is as expected */
-			if ((val & mask) != (wr_val & mask)) {
-				DP(NETIF_MSG_PROBE,
-				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
-				   offset, val, wr_val, mask);
-				goto test_reg_exit;
-			}
-		}
-	}
-
-	rc = 0;
-
-test_reg_exit:
-	return rc;
-}
-
-static int bnx2x_test_memory(struct bnx2x *bp)
-{
-	int i, j, rc = -ENODEV;
-	u32 val;
-	static const struct {
-		u32 offset;
-		int size;
-	} mem_tbl[] = {
-		{ CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
-		{ CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
-		{ CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
-		{ DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
-		{ TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
-		{ UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
-		{ XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
-
-		{ 0xffffffff, 0 }
-	};
-	static const struct {
-		char *name;
-		u32 offset;
-		u32 e1_mask;
-		u32 e1h_mask;
-	} prty_tbl[] = {
-		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
-		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
-		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
-		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
-		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
-		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
-
-		{ NULL, 0xffffffff, 0, 0 }
-	};
-
-	if (!netif_running(bp->dev))
-		return rc;
-
-	/* Go through all the memories */
-	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
-		for (j = 0; j < mem_tbl[i].size; j++)
-			REG_RD(bp, mem_tbl[i].offset + j*4);
-
-	/* Check the parity status */
-	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
-		val = REG_RD(bp, prty_tbl[i].offset);
-		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
-			DP(NETIF_MSG_HW,
-			   "%s is 0x%x\n", prty_tbl[i].name, val);
-			goto test_mem_exit;
-		}
-	}
-
-	rc = 0;
-
-test_mem_exit:
-	return rc;
-}
-
-static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
-{
-	int cnt = 1000;
-
-	if (link_up)
-		while (bnx2x_link_test(bp) && cnt--)
-			msleep(10);
-}
-
-static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
-{
-	unsigned int pkt_size, num_pkts, i;
-	struct sk_buff *skb;
-	unsigned char *packet;
-	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
-	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
-	u16 tx_start_idx, tx_idx;
-	u16 rx_start_idx, rx_idx;
-	u16 pkt_prod, bd_prod;
-	struct sw_tx_bd *tx_buf;
-	struct eth_tx_start_bd *tx_start_bd;
-	struct eth_tx_parse_bd *pbd = NULL;
-	dma_addr_t mapping;
-	union eth_rx_cqe *cqe;
-	u8 cqe_fp_flags;
-	struct sw_rx_bd *rx_buf;
-	u16 len;
-	int rc = -ENODEV;
-
-	/* check the loopback mode */
-	switch (loopback_mode) {
-	case BNX2X_PHY_LOOPBACK:
-		if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
-			return -EINVAL;
-		break;
-	case BNX2X_MAC_LOOPBACK:
-		bp->link_params.loopback_mode = LOOPBACK_BMAC;
-		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* prepare the loopback packet */
-	pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
-		     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
-	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-	if (!skb) {
-		rc = -ENOMEM;
-		goto test_loopback_exit;
-	}
-	packet = skb_put(skb, pkt_size);
-	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
-	memset(packet + ETH_ALEN, 0, ETH_ALEN);
-	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
-	for (i = ETH_HLEN; i < pkt_size; i++)
-		packet[i] = (unsigned char) (i & 0xff);
-
-	/* send the loopback packet */
-	num_pkts = 0;
-	tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
-	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
-
-	pkt_prod = fp_tx->tx_pkt_prod++;
-	tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
-	tx_buf->first_bd = fp_tx->tx_bd_prod;
-	tx_buf->skb = skb;
-	tx_buf->flags = 0;
-
-	bd_prod = TX_BD(fp_tx->tx_bd_prod);
-	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
-	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
-	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
-	tx_start_bd->vlan = cpu_to_le16(pkt_prod);
-	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-	tx_start_bd->general_data = ((UNICAST_ADDRESS <<
-				ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
-
-	/* turn on parsing and get a BD */
-	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-	pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
-
-	memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
-
-	wmb();
-
-	fp_tx->tx_db.data.prod += 2;
-	barrier();
-	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
-
-	mmiowb();
-
-	num_pkts++;
-	fp_tx->tx_bd_prod += 2; /* start + pbd */
-
-	udelay(100);
-
-	tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
-	if (tx_idx != tx_start_idx + num_pkts)
-		goto test_loopback_exit;
-
-	rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
-	if (rx_idx != rx_start_idx + num_pkts)
-		goto test_loopback_exit;
-
-	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
-	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-	if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
-		goto test_loopback_rx_exit;
-
-	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-	if (len != pkt_size)
-		goto test_loopback_rx_exit;
-
-	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
-	skb = rx_buf->skb;
-	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
-	for (i = ETH_HLEN; i < pkt_size; i++)
-		if (*(skb->data + i) != (unsigned char) (i & 0xff))
-			goto test_loopback_rx_exit;
-
-	rc = 0;
-
-test_loopback_rx_exit:
-
-	fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
-	fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
-	fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
-	fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
-
-	/* Update producers */
-	bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
-			     fp_rx->rx_sge_prod);
-
-test_loopback_exit:
-	bp->link_params.loopback_mode = LOOPBACK_NONE;
-
-	return rc;
-}
-
-static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
-{
-	int rc = 0, res;
-
-	if (BP_NOMCP(bp))
-		return rc;
-
-	if (!netif_running(bp->dev))
-		return BNX2X_LOOPBACK_FAILED;
-
-	bnx2x_netif_stop(bp, 1);
-	bnx2x_acquire_phy_lock(bp);
-
-	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
-	if (res) {
-		DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
-		rc |= BNX2X_PHY_LOOPBACK_FAILED;
-	}
-
-	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
-	if (res) {
-		DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
-		rc |= BNX2X_MAC_LOOPBACK_FAILED;
-	}
-
-	bnx2x_release_phy_lock(bp);
-	bnx2x_netif_start(bp);
-
-	return rc;
-}
-
-#define CRC32_RESIDUAL			0xdebb20e3
-
-static int bnx2x_test_nvram(struct bnx2x *bp)
-{
-	static const struct {
-		int offset;
-		int size;
-	} nvram_tbl[] = {
-		{     0,  0x14 }, /* bootstrap */
-		{  0x14,  0xec }, /* dir */
-		{ 0x100, 0x350 }, /* manuf_info */
-		{ 0x450,  0xf0 }, /* feature_info */
-		{ 0x640,  0x64 }, /* upgrade_key_info */
-		{ 0x6a4,  0x64 },
-		{ 0x708,  0x70 }, /* manuf_key_info */
-		{ 0x778,  0x70 },
-		{     0,     0 }
-	};
-	__be32 buf[0x350 / 4];
-	u8 *data = (u8 *)buf;
-	int i, rc;
-	u32 magic, crc;
-
-	if (BP_NOMCP(bp))
-		return 0;
-
-	rc = bnx2x_nvram_read(bp, 0, data, 4);
-	if (rc) {
-		DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
-		goto test_nvram_exit;
-	}
-
-	magic = be32_to_cpu(buf[0]);
-	if (magic != 0x669955aa) {
-		DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
-		rc = -ENODEV;
-		goto test_nvram_exit;
-	}
-
-	for (i = 0; nvram_tbl[i].size; i++) {
-
-		rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
-				      nvram_tbl[i].size);
-		if (rc) {
-			DP(NETIF_MSG_PROBE,
-			   "nvram_tbl[%d] read data (rc %d)\n", i, rc);
-			goto test_nvram_exit;
-		}
-
-		crc = ether_crc_le(nvram_tbl[i].size, data);
-		if (crc != CRC32_RESIDUAL) {
-			DP(NETIF_MSG_PROBE,
-			   "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
-			rc = -ENODEV;
-			goto test_nvram_exit;
-		}
-	}
-
-test_nvram_exit:
-	return rc;
-}
-
-static int bnx2x_test_intr(struct bnx2x *bp)
-{
-	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-	int i, rc;
-
-	if (!netif_running(bp->dev))
-		return -ENODEV;
-
-	config->hdr.length = 0;
-	if (CHIP_IS_E1(bp))
-		/* use last unicast entries */
-		config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
-	else
-		config->hdr.offset = BP_FUNC(bp);
-	config->hdr.client_id = bp->fp->cl_id;
-	config->hdr.reserved1 = 0;
-
-	bp->set_mac_pending++;
-	smp_wmb();
-	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-	if (rc == 0) {
-		for (i = 0; i < 10; i++) {
-			if (!bp->set_mac_pending)
-				break;
-			smp_rmb();
-			msleep_interruptible(10);
-		}
-		if (i == 10)
-			rc = -ENODEV;
-	}
-
-	return rc;
-}
-
-static void bnx2x_self_test(struct net_device *dev,
-			    struct ethtool_test *etest, u64 *buf)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		etest->flags |= ETH_TEST_FL_FAILED;
-		return;
-	}
-
-	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
-
-	if (!netif_running(dev))
-		return;
-
-	/* offline tests are not supported in MF mode */
-	if (IS_E1HMF(bp))
-		etest->flags &= ~ETH_TEST_FL_OFFLINE;
-
-	if (etest->flags & ETH_TEST_FL_OFFLINE) {
-		int port = BP_PORT(bp);
-		u32 val;
-		u8 link_up;
-
-		/* save current value of input enable for TX port IF */
-		val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
-		/* disable input for TX port IF */
-		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
-
-		link_up = (bnx2x_link_test(bp) == 0);
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		bnx2x_nic_load(bp, LOAD_DIAG);
-		/* wait until link state is restored */
-		bnx2x_wait_for_link(bp, link_up);
-
-		if (bnx2x_test_registers(bp) != 0) {
-			buf[0] = 1;
-			etest->flags |= ETH_TEST_FL_FAILED;
-		}
-		if (bnx2x_test_memory(bp) != 0) {
-			buf[1] = 1;
-			etest->flags |= ETH_TEST_FL_FAILED;
-		}
-		buf[2] = bnx2x_test_loopback(bp, link_up);
-		if (buf[2] != 0)
-			etest->flags |= ETH_TEST_FL_FAILED;
-
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-
-		/* restore input for TX port IF */
-		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
-		bnx2x_nic_load(bp, LOAD_NORMAL);
-		/* wait until link state is restored */
-		bnx2x_wait_for_link(bp, link_up);
-	}
-	if (bnx2x_test_nvram(bp) != 0) {
-		buf[3] = 1;
-		etest->flags |= ETH_TEST_FL_FAILED;
-	}
-	if (bnx2x_test_intr(bp) != 0) {
-		buf[4] = 1;
-		etest->flags |= ETH_TEST_FL_FAILED;
-	}
-	if (bp->port.pmf)
-		if (bnx2x_link_test(bp) != 0) {
-			buf[5] = 1;
-			etest->flags |= ETH_TEST_FL_FAILED;
-		}
-
-#ifdef BNX2X_EXTRA_DEBUG
-	bnx2x_panic_dump(bp);
-#endif
-}
-
-static const struct {
-	long offset;
-	int size;
-	u8 string[ETH_GSTRING_LEN];
-} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
-/* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
-	{ Q_STATS_OFFSET32(error_bytes_received_hi),
-						8, "[%d]: rx_error_bytes" },
-	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
-						8, "[%d]: rx_ucast_packets" },
-	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
-						8, "[%d]: rx_mcast_packets" },
-	{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
-						8, "[%d]: rx_bcast_packets" },
-	{ Q_STATS_OFFSET32(no_buff_discard_hi),	8, "[%d]: rx_discards" },
-	{ Q_STATS_OFFSET32(rx_err_discard_pkt),
-					 4, "[%d]: rx_phy_ip_err_discards"},
-	{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
-					 4, "[%d]: rx_skb_alloc_discard" },
-	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
-
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%d]: tx_bytes" },
-	{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
-						8, "[%d]: tx_ucast_packets" },
-	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
-						8, "[%d]: tx_mcast_packets" },
-	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-						8, "[%d]: tx_bcast_packets" }
-};
-
-static const struct {
-	long offset;
-	int size;
-	u32 flags;
-#define STATS_FLAGS_PORT		1
-#define STATS_FLAGS_FUNC		2
-#define STATS_FLAGS_BOTH		(STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
-	u8 string[ETH_GSTRING_LEN];
-} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */	{ STATS_OFFSET32(total_bytes_received_hi),
-				8, STATS_FLAGS_BOTH, "rx_bytes" },
-	{ STATS_OFFSET32(error_bytes_received_hi),
-				8, STATS_FLAGS_BOTH, "rx_error_bytes" },
-	{ STATS_OFFSET32(total_unicast_packets_received_hi),
-				8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
-	{ STATS_OFFSET32(total_multicast_packets_received_hi),
-				8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
-	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
-				8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
-	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
-				8, STATS_FLAGS_PORT, "rx_crc_errors" },
-	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
-				8, STATS_FLAGS_PORT, "rx_align_errors" },
-	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
-				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
-	{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
-				8, STATS_FLAGS_PORT, "rx_oversize_packets" },
-/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
-				8, STATS_FLAGS_PORT, "rx_fragments" },
-	{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
-				8, STATS_FLAGS_PORT, "rx_jabbers" },
-	{ STATS_OFFSET32(no_buff_discard_hi),
-				8, STATS_FLAGS_BOTH, "rx_discards" },
-	{ STATS_OFFSET32(mac_filter_discard),
-				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
-	{ STATS_OFFSET32(xxoverflow_discard),
-				4, STATS_FLAGS_PORT, "rx_fw_discards" },
-	{ STATS_OFFSET32(brb_drop_hi),
-				8, STATS_FLAGS_PORT, "rx_brb_discard" },
-	{ STATS_OFFSET32(brb_truncate_hi),
-				8, STATS_FLAGS_PORT, "rx_brb_truncate" },
-	{ STATS_OFFSET32(pause_frames_received_hi),
-				8, STATS_FLAGS_PORT, "rx_pause_frames" },
-	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
-				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
-	{ STATS_OFFSET32(nig_timer_max),
-			4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
-/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
-				4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
-	{ STATS_OFFSET32(rx_skb_alloc_failed),
-				4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
-	{ STATS_OFFSET32(hw_csum_err),
-				4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
-
-	{ STATS_OFFSET32(total_bytes_transmitted_hi),
-				8, STATS_FLAGS_BOTH, "tx_bytes" },
-	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
-				8, STATS_FLAGS_PORT, "tx_error_bytes" },
-	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
-				8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
-	{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
-				8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
-	{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-				8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
-	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
-				8, STATS_FLAGS_PORT, "tx_mac_errors" },
-	{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
-				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
-				8, STATS_FLAGS_PORT, "tx_single_collisions" },
-	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
-				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
-				8, STATS_FLAGS_PORT, "tx_deferred" },
-	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
-				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
-	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
-				8, STATS_FLAGS_PORT, "tx_late_collisions" },
-	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
-				8, STATS_FLAGS_PORT, "tx_total_collisions" },
-	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
-				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
-	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
-			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
-	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
-			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
-	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
-			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
-/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
-			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
-	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
-			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
-			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
-	{ STATS_OFFSET32(pause_frames_sent_hi),
-				8, STATS_FLAGS_PORT, "tx_pause_frames" }
-};
-
-#define IS_PORT_STAT(i) \
-	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
-#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_E1HMF_MODE_STAT(bp) \
-			(IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
-
-static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int i, num_stats;
-
-	switch (stringset) {
-	case ETH_SS_STATS:
-		if (is_multi(bp)) {
-			num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
-			if (!IS_E1HMF_MODE_STAT(bp))
-				num_stats += BNX2X_NUM_STATS;
-		} else {
-			if (IS_E1HMF_MODE_STAT(bp)) {
-				num_stats = 0;
-				for (i = 0; i < BNX2X_NUM_STATS; i++)
-					if (IS_FUNC_STAT(i))
-						num_stats++;
-			} else
-				num_stats = BNX2X_NUM_STATS;
-		}
-		return num_stats;
-
-	case ETH_SS_TEST:
-		return BNX2X_NUM_TESTS;
-
-	default:
-		return -EINVAL;
-	}
-}
-
-static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int i, j, k;
-
-	switch (stringset) {
-	case ETH_SS_STATS:
-		if (is_multi(bp)) {
-			k = 0;
-			for_each_queue(bp, i) {
-				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
-					sprintf(buf + (k + j)*ETH_GSTRING_LEN,
-						bnx2x_q_stats_arr[j].string, i);
-				k += BNX2X_NUM_Q_STATS;
-			}
-			if (IS_E1HMF_MODE_STAT(bp))
-				break;
-			for (j = 0; j < BNX2X_NUM_STATS; j++)
-				strcpy(buf + (k + j)*ETH_GSTRING_LEN,
-				       bnx2x_stats_arr[j].string);
-		} else {
-			for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-				if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
-					continue;
-				strcpy(buf + j*ETH_GSTRING_LEN,
-				       bnx2x_stats_arr[i].string);
-				j++;
-			}
-		}
-		break;
-
-	case ETH_SS_TEST:
-		memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
-		break;
-	}
-}
-
-static void bnx2x_get_ethtool_stats(struct net_device *dev,
-				    struct ethtool_stats *stats, u64 *buf)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	u32 *hw_stats, *offset;
-	int i, j, k;
-
-	if (is_multi(bp)) {
-		k = 0;
-		for_each_queue(bp, i) {
-			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
-			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
-				if (bnx2x_q_stats_arr[j].size == 0) {
-					/* skip this counter */
-					buf[k + j] = 0;
-					continue;
-				}
-				offset = (hw_stats +
-					  bnx2x_q_stats_arr[j].offset);
-				if (bnx2x_q_stats_arr[j].size == 4) {
-					/* 4-byte counter */
-					buf[k + j] = (u64) *offset;
-					continue;
-				}
-				/* 8-byte counter */
-				buf[k + j] = HILO_U64(*offset, *(offset + 1));
-			}
-			k += BNX2X_NUM_Q_STATS;
-		}
-		if (IS_E1HMF_MODE_STAT(bp))
-			return;
-		hw_stats = (u32 *)&bp->eth_stats;
-		for (j = 0; j < BNX2X_NUM_STATS; j++) {
-			if (bnx2x_stats_arr[j].size == 0) {
-				/* skip this counter */
-				buf[k + j] = 0;
-				continue;
-			}
-			offset = (hw_stats + bnx2x_stats_arr[j].offset);
-			if (bnx2x_stats_arr[j].size == 4) {
-				/* 4-byte counter */
-				buf[k + j] = (u64) *offset;
-				continue;
-			}
-			/* 8-byte counter */
-			buf[k + j] = HILO_U64(*offset, *(offset + 1));
-		}
-	} else {
-		hw_stats = (u32 *)&bp->eth_stats;
-		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-			if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
-				continue;
-			if (bnx2x_stats_arr[i].size == 0) {
-				/* skip this counter */
-				buf[j] = 0;
-				j++;
-				continue;
-			}
-			offset = (hw_stats + bnx2x_stats_arr[i].offset);
-			if (bnx2x_stats_arr[i].size == 4) {
-				/* 4-byte counter */
-				buf[j] = (u64) *offset;
-				j++;
-				continue;
-			}
-			/* 8-byte counter */
-			buf[j] = HILO_U64(*offset, *(offset + 1));
-			j++;
-		}
-	}
-}
-
-static int bnx2x_phys_id(struct net_device *dev, u32 data)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int i;
-
-	if (!netif_running(dev))
-		return 0;
-
-	if (!bp->port.pmf)
-		return 0;
-
-	if (data == 0)
-		data = 2;
-
-	for (i = 0; i < (data * 2); i++) {
-		if ((i % 2) == 0)
-			bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
-				      SPEED_1000);
-		else
-			bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
-
-		msleep_interruptible(500);
-		if (signal_pending(current))
-			break;
-	}
-
-	if (bp->link_vars.link_up)
-		bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
-			      bp->link_vars.line_speed);
-
-	return 0;
-}
-
-static const struct ethtool_ops bnx2x_ethtool_ops = {
-	.get_settings		= bnx2x_get_settings,
-	.set_settings		= bnx2x_set_settings,
-	.get_drvinfo		= bnx2x_get_drvinfo,
-	.get_regs_len		= bnx2x_get_regs_len,
-	.get_regs		= bnx2x_get_regs,
-	.get_wol		= bnx2x_get_wol,
-	.set_wol		= bnx2x_set_wol,
-	.get_msglevel		= bnx2x_get_msglevel,
-	.set_msglevel		= bnx2x_set_msglevel,
-	.nway_reset		= bnx2x_nway_reset,
-	.get_link		= bnx2x_get_link,
-	.get_eeprom_len		= bnx2x_get_eeprom_len,
-	.get_eeprom		= bnx2x_get_eeprom,
-	.set_eeprom		= bnx2x_set_eeprom,
-	.get_coalesce		= bnx2x_get_coalesce,
-	.set_coalesce		= bnx2x_set_coalesce,
-	.get_ringparam		= bnx2x_get_ringparam,
-	.set_ringparam		= bnx2x_set_ringparam,
-	.get_pauseparam		= bnx2x_get_pauseparam,
-	.set_pauseparam		= bnx2x_set_pauseparam,
-	.get_rx_csum		= bnx2x_get_rx_csum,
-	.set_rx_csum		= bnx2x_set_rx_csum,
-	.get_tx_csum		= ethtool_op_get_tx_csum,
-	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
-	.set_flags		= bnx2x_set_flags,
-	.get_flags		= ethtool_op_get_flags,
-	.get_sg			= ethtool_op_get_sg,
-	.set_sg			= ethtool_op_set_sg,
-	.get_tso		= ethtool_op_get_tso,
-	.set_tso		= bnx2x_set_tso,
-	.self_test		= bnx2x_self_test,
-	.get_sset_count		= bnx2x_get_sset_count,
-	.get_strings		= bnx2x_get_strings,
-	.phys_id		= bnx2x_phys_id,
-	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
-};
-
-/* end of ethtool_ops */
-
-/****************************************************************************
-* General service functions
-****************************************************************************/
-
-static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
-{
-	u16 pmcsr;
-
-	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
-
-	switch (state) {
-	case PCI_D0:
-		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
-				       PCI_PM_CTRL_PME_STATUS));
-
-		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
-			/* delay required during transition out of D3hot */
-			msleep(20);
-		break;
-
-	case PCI_D3hot:
-		/* If there are other clients above don't
-		   shut down the power */
-		if (atomic_read(&bp->pdev->enable_cnt) != 1)
-			return 0;
-		/* Don't shut down the power for emulation and FPGA */
-		if (CHIP_REV_IS_SLOW(bp))
-			return 0;
-
-		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-		pmcsr |= 3;
-
-		if (bp->wol)
-			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
-
-		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-				      pmcsr);
-
-		/* No more memory access after this point until
-		* device is brought back to D0.
-		*/
-		break;
-
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
-{
-	u16 rx_cons_sb;
-
-	/* Tell compiler that status block fields can change */
-	barrier();
-	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-		rx_cons_sb++;
-	return (fp->rx_comp_cons != rx_cons_sb);
-}
-
-/*
- * net_device service functions
- */
-
-static int bnx2x_poll(struct napi_struct *napi, int budget)
-{
-	int work_done = 0;
-	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
-						 napi);
-	struct bnx2x *bp = fp->bp;
-
-	while (1) {
-#ifdef BNX2X_STOP_ON_ERROR
-		if (unlikely(bp->panic)) {
-			napi_complete(napi);
-			return 0;
-		}
-#endif
-
-		if (bnx2x_has_tx_work(fp))
-			bnx2x_tx_int(fp);
-
-		if (bnx2x_has_rx_work(fp)) {
-			work_done += bnx2x_rx_int(fp, budget - work_done);
-
-			/* must not complete if we consumed full budget */
-			if (work_done >= budget)
-				break;
-		}
-
-		/* Fall out from the NAPI loop if needed */
-		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-			bnx2x_update_fpsb_idx(fp);
-		/* bnx2x_has_rx_work() reads the status block, thus we need
-		 * to ensure that status block indices have been actually read
-		 * (bnx2x_update_fpsb_idx) prior to this check
-		 * (bnx2x_has_rx_work) so that we won't write the "newer"
-		 * value of the status block to IGU (if there was a DMA right
-		 * after bnx2x_has_rx_work and if there is no rmb, the memory
-		 * reading (bnx2x_update_fpsb_idx) may be postponed to right
-		 * before bnx2x_ack_sb). In this case there will never be
-		 * another interrupt until there is another update of the
-		 * status block, while there is still unhandled work.
-		 */
-			rmb();
-
-			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-				napi_complete(napi);
-				/* Re-enable interrupts */
-				bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
-					     le16_to_cpu(fp->fp_c_idx),
-					     IGU_INT_NOP, 1);
-				bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
-					     le16_to_cpu(fp->fp_u_idx),
-					     IGU_INT_ENABLE, 1);
-				break;
-			}
-		}
-	}
-
-	return work_done;
-}
-
-
-/* we split the first BD into headers and data BDs
- * to ease the pain of our fellow microcode engineers
- * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
- */
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
-				   struct bnx2x_fastpath *fp,
-				   struct sw_tx_bd *tx_buf,
-				   struct eth_tx_start_bd **tx_bd, u16 hlen,
-				   u16 bd_prod, int nbd)
-{
-	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
-	struct eth_tx_bd *d_tx_bd;
-	dma_addr_t mapping;
-	int old_len = le16_to_cpu(h_tx_bd->nbytes);
-
-	/* first fix first BD */
-	h_tx_bd->nbd = cpu_to_le16(nbd);
-	h_tx_bd->nbytes = cpu_to_le16(hlen);
-
-	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d "
-	   "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
-	   h_tx_bd->addr_lo, h_tx_bd->nbd);
-
-	/* now get a new data BD
-	 * (after the pbd) and fill it */
-	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-	d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-
-	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
-			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
-
-	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
-
-	/* this marks the BD as one that has no individual mapping */
-	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
-
-	DP(NETIF_MSG_TX_QUEUED,
-	   "TSO split data size is %d (%x:%x)\n",
-	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
-
-	/* update tx_bd */
-	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
-
-	return bd_prod;
-}
-
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
-{
-	if (fix > 0)
-		csum = (u16) ~csum_fold(csum_sub(csum,
-				csum_partial(t_header - fix, fix, 0)));
-
-	else if (fix < 0)
-		csum = (u16) ~csum_fold(csum_add(csum,
-				csum_partial(t_header, -fix, 0)));
-
-	return swab16(csum);
-}
-
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
-{
-	u32 rc;
-
-	if (skb->ip_summed != CHECKSUM_PARTIAL)
-		rc = XMIT_PLAIN;
-
-	else {
-		if (skb->protocol == htons(ETH_P_IPV6)) {
-			rc = XMIT_CSUM_V6;
-			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
-				rc |= XMIT_CSUM_TCP;
-
-		} else {
-			rc = XMIT_CSUM_V4;
-			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-				rc |= XMIT_CSUM_TCP;
-		}
-	}
-
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-		rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
-	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
-
-	return rc;
-}
-
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
-/* check if packet requires linearization (packet is too fragmented)
-   no need to check fragmentation if page size > 8K (there will be no
-   violation to FW restrictions) */
-static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
-			     u32 xmit_type)
-{
-	int to_copy = 0;
-	int hlen = 0;
-	int first_bd_sz = 0;
-
-	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
-	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
-
-		if (xmit_type & XMIT_GSO) {
-			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
-			/* Check if LSO packet needs to be copied:
-			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
-			int wnd_size = MAX_FETCH_BD - 3;
-			/* Number of windows to check */
-			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
-			int wnd_idx = 0;
-			int frag_idx = 0;
-			u32 wnd_sum = 0;
-
-			/* Headers length */
-			hlen = (int)(skb_transport_header(skb) - skb->data) +
-				tcp_hdrlen(skb);
-
-			/* Amount of data (w/o headers) on linear part of SKB*/
-			first_bd_sz = skb_headlen(skb) - hlen;
-
-			wnd_sum  = first_bd_sz;
-
-			/* Calculate the first sum - it's special */
-			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
-				wnd_sum +=
-					skb_shinfo(skb)->frags[frag_idx].size;
-
-			/* If there was data on linear skb data - check it */
-			if (first_bd_sz > 0) {
-				if (unlikely(wnd_sum < lso_mss)) {
-					to_copy = 1;
-					goto exit_lbl;
-				}
-
-				wnd_sum -= first_bd_sz;
-			}
-
-			/* Others are easier: run through the frag list and
-			   check all windows */
-			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
-				wnd_sum +=
-			  skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
-
-				if (unlikely(wnd_sum < lso_mss)) {
-					to_copy = 1;
-					break;
-				}
-				wnd_sum -=
-					skb_shinfo(skb)->frags[wnd_idx].size;
-			}
-		} else {
-			/* in non-LSO too fragmented packet should always
-			   be linearized */
-			to_copy = 1;
-		}
-	}
-
-exit_lbl:
-	if (unlikely(to_copy))
-		DP(NETIF_MSG_TX_QUEUED,
-		   "Linearization IS REQUIRED for %s packet. "
-		   "num_frags %d  hlen %d  first_bd_sz %d\n",
-		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
-		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
-
-	return to_copy;
-}
-#endif
-
-/* called with netif_tx_lock
- * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
- * netif_wake_queue()
- */
-static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	struct bnx2x_fastpath *fp;
-	struct netdev_queue *txq;
-	struct sw_tx_bd *tx_buf;
-	struct eth_tx_start_bd *tx_start_bd;
-	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
-	struct eth_tx_parse_bd *pbd = NULL;
-	u16 pkt_prod, bd_prod;
-	int nbd, fp_index;
-	dma_addr_t mapping;
-	u32 xmit_type = bnx2x_xmit_type(bp, skb);
-	int i;
-	u8 hlen = 0;
-	__le16 pkt_size = 0;
-	struct ethhdr *eth;
-	u8 mac_type = UNICAST_ADDRESS;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return NETDEV_TX_BUSY;
-#endif
-
-	fp_index = skb_get_queue_mapping(skb);
-	txq = netdev_get_tx_queue(dev, fp_index);
-
-	fp = &bp->fp[fp_index];
-
-	if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
-		fp->eth_q_stats.driver_xoff++;
-		netif_tx_stop_queue(txq);
-		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
-		return NETDEV_TX_BUSY;
-	}
-
-	DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
-	   "  gso type %x  xmit_type %x\n",
-	   skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
-	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
-
-	eth = (struct ethhdr *)skb->data;
-
-	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
-	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
-		if (is_broadcast_ether_addr(eth->h_dest))
-			mac_type = BROADCAST_ADDRESS;
-		else
-			mac_type = MULTICAST_ADDRESS;
-	}
-
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
-	/* First, check if we need to linearize the skb (due to FW
-	   restrictions). No need to check fragmentation if page size > 8K
-	   (there will be no violation to FW restrictions) */
-	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
-		/* Statistics of linearization */
-		bp->lin_cnt++;
-		if (skb_linearize(skb) != 0) {
-			DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
-			   "silently dropping this SKB\n");
-			dev_kfree_skb_any(skb);
-			return NETDEV_TX_OK;
-		}
-	}
-#endif
-
-	/*
-	Please read carefully. First we use one BD which we mark as start,
-	then we have a parsing info BD (used for TSO or xsum),
-	and only then we have the rest of the TSO BDs.
-	(don't forget to mark the last one as last,
-	and to unmap only AFTER you write to the BD ...)
-	And above all, all pdb sizes are in words - NOT DWORDS!
-	*/
-
-	pkt_prod = fp->tx_pkt_prod++;
-	bd_prod = TX_BD(fp->tx_bd_prod);
-
-	/* get a tx_buf and first BD */
-	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
-	tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
-
-	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-	tx_start_bd->general_data =  (mac_type <<
-					ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
-	/* header nbd */
-	tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
-
-	/* remember the first BD of the packet */
-	tx_buf->first_bd = fp->tx_bd_prod;
-	tx_buf->skb = skb;
-	tx_buf->flags = 0;
-
-	DP(NETIF_MSG_TX_QUEUED,
-	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
-	   pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
-
-#ifdef BCM_VLAN
-	if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
-	    (bp->flags & HW_VLAN_TX_FLAG)) {
-		tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
-		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
-	} else
-#endif
-		tx_start_bd->vlan = cpu_to_le16(pkt_prod);
-
-	/* turn on parsing and get a BD */
-	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-	pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
-
-	memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
-
-	if (xmit_type & XMIT_CSUM) {
-		hlen = (skb_network_header(skb) - skb->data) / 2;
-
-		/* for now NS flag is not used in Linux */
-		pbd->global_data =
-			(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
-				 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
-
-		pbd->ip_hlen = (skb_transport_header(skb) -
-				skb_network_header(skb)) / 2;
-
-		hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
-
-		pbd->total_hlen = cpu_to_le16(hlen);
-		hlen = hlen*2;
-
-		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
-
-		if (xmit_type & XMIT_CSUM_V4)
-			tx_start_bd->bd_flags.as_bitfield |=
-						ETH_TX_BD_FLAGS_IP_CSUM;
-		else
-			tx_start_bd->bd_flags.as_bitfield |=
-						ETH_TX_BD_FLAGS_IPV6;
-
-		if (xmit_type & XMIT_CSUM_TCP) {
-			pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
-
-		} else {
-			s8 fix = SKB_CS_OFF(skb); /* signed! */
-
-			pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
-
-			DP(NETIF_MSG_TX_QUEUED,
-			   "hlen %d  fix %d  csum before fix %x\n",
-			   le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
-
-			/* HW bug: fixup the CSUM */
-			pbd->tcp_pseudo_csum =
-				bnx2x_csum_fix(skb_transport_header(skb),
-					       SKB_CS(skb), fix);
-
-			DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
-			   pbd->tcp_pseudo_csum);
-		}
-	}
-
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
-
-	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
-	tx_start_bd->nbd = cpu_to_le16(nbd);
-	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
-	pkt_size = tx_start_bd->nbytes;
-
-	DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
-	   "  nbytes %d  flags %x  vlan %x\n",
-	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
-	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
-	   tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
-
-	if (xmit_type & XMIT_GSO) {
-
-		DP(NETIF_MSG_TX_QUEUED,
-		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
-		   skb->len, hlen, skb_headlen(skb),
-		   skb_shinfo(skb)->gso_size);
-
-		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
-
-		if (unlikely(skb_headlen(skb) > hlen))
-			bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
-						 hlen, bd_prod, ++nbd);
-
-		pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-		pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
-		pbd->tcp_flags = pbd_tcp_flags(skb);
-
-		if (xmit_type & XMIT_GSO_V4) {
-			pbd->ip_id = swab16(ip_hdr(skb)->id);
-			pbd->tcp_pseudo_csum =
-				swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-							  ip_hdr(skb)->daddr,
-							  0, IPPROTO_TCP, 0));
-
-		} else
-			pbd->tcp_pseudo_csum =
-				swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-							&ipv6_hdr(skb)->daddr,
-							0, IPPROTO_TCP, 0));
-
-		pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
-	}
-	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
-
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-		tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-		if (total_pkt_bd == NULL)
-			total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-
-		mapping = dma_map_page(&bp->pdev->dev, frag->page,
-				       frag->page_offset,
-				       frag->size, DMA_TO_DEVICE);
-
-		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-		tx_data_bd->nbytes = cpu_to_le16(frag->size);
-		le16_add_cpu(&pkt_size, frag->size);
-
-		DP(NETIF_MSG_TX_QUEUED,
-		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
-		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
-		   le16_to_cpu(tx_data_bd->nbytes));
-	}
-
-	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
-
-	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-
-	/* now send a tx doorbell, counting the next BD
-	 * if the packet contains or ends with it
-	 */
-	if (TX_BD_POFF(bd_prod) < nbd)
-		nbd++;
-
-	if (total_pkt_bd != NULL)
-		total_pkt_bd->total_pkt_bytes = pkt_size;
-
-	if (pbd)
-		DP(NETIF_MSG_TX_QUEUED,
-		   "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
-		   "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
-		   pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
-		   pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
-		   pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
-
-	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
-
-	/*
-	 * Make sure that the BD data is updated before updating the producer
-	 * since FW might read the BD right after the producer is updated.
-	 * This is only applicable for weak-ordered memory model archs such
-	 * as IA-64. The following barrier is also mandatory since FW will
-	 * assumes packets must have BDs.
-	 */
-	wmb();
-
-	fp->tx_db.data.prod += nbd;
-	barrier();
-	DOORBELL(bp, fp->index, fp->tx_db.raw);
-
-	mmiowb();
-
-	fp->tx_bd_prod += nbd;
-
-	if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
-		netif_tx_stop_queue(txq);
-
-		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
-		 * ordering of set_bit() in netif_tx_stop_queue() and read of
-		 * fp->bd_tx_cons */
-		smp_mb();
-
-		fp->eth_q_stats.driver_xoff++;
-		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
-			netif_tx_wake_queue(txq);
-	}
-	fp->tx_pkt++;
-
-	return NETDEV_TX_OK;
-}
-
-/* called with rtnl_lock */
-static int bnx2x_open(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	netif_carrier_off(dev);
-
-	bnx2x_set_power_state(bp, PCI_D0);
-
-	if (!bnx2x_reset_is_done(bp)) {
-		do {
-			/* Reset MCP mail box sequence if there is on going
-			 * recovery
-			 */
-			bp->fw_seq = 0;
-
-			/* If it's the first function to load and reset done
-			 * is still not cleared it may mean that. We don't
-			 * check the attention state here because it may have
-			 * already been cleared by a "common" reset but we
-			 * shell proceed with "process kill" anyway.
-			 */
-			if ((bnx2x_get_load_cnt(bp) == 0) &&
-				bnx2x_trylock_hw_lock(bp,
-				HW_LOCK_RESOURCE_RESERVED_08) &&
-				(!bnx2x_leader_reset(bp))) {
-				DP(NETIF_MSG_HW, "Recovered in open\n");
-				break;
-			}
-
-			bnx2x_set_power_state(bp, PCI_D3hot);
-
-			printk(KERN_ERR"%s: Recovery flow hasn't been properly"
-			" completed yet. Try again later. If u still see this"
-			" message after a few retries then power cycle is"
-			" required.\n", bp->dev->name);
-
-			return -EAGAIN;
-		} while (0);
-	}
-
-	bp->recovery_state = BNX2X_RECOVERY_DONE;
-
-	return bnx2x_nic_load(bp, LOAD_OPEN);
-}
-
-/* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	/* Unload the driver, release IRQs */
-	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
-	bnx2x_set_power_state(bp, PCI_D3hot);
-
-	return 0;
-}
-
-/* called with netif_tx_lock from dev_mcast.c */
-static void bnx2x_set_rx_mode(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
-	int port = BP_PORT(bp);
-
-	if (bp->state != BNX2X_STATE_OPEN) {
-		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
-		return;
-	}
-
-	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
-
-	if (dev->flags & IFF_PROMISC)
-		rx_mode = BNX2X_RX_MODE_PROMISC;
-
-	else if ((dev->flags & IFF_ALLMULTI) ||
-		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
-		  CHIP_IS_E1(bp)))
-		rx_mode = BNX2X_RX_MODE_ALLMULTI;
-
-	else { /* some multicasts */
-		if (CHIP_IS_E1(bp)) {
-			int i, old, offset;
-			struct netdev_hw_addr *ha;
-			struct mac_configuration_cmd *config =
-						bnx2x_sp(bp, mcast_config);
-
-			i = 0;
-			netdev_for_each_mc_addr(ha, dev) {
-				config->config_table[i].
-					cam_entry.msb_mac_addr =
-					swab16(*(u16 *)&ha->addr[0]);
-				config->config_table[i].
-					cam_entry.middle_mac_addr =
-					swab16(*(u16 *)&ha->addr[2]);
-				config->config_table[i].
-					cam_entry.lsb_mac_addr =
-					swab16(*(u16 *)&ha->addr[4]);
-				config->config_table[i].cam_entry.flags =
-							cpu_to_le16(port);
-				config->config_table[i].
-					target_table_entry.flags = 0;
-				config->config_table[i].target_table_entry.
-					clients_bit_vector =
-						cpu_to_le32(1 << BP_L_ID(bp));
-				config->config_table[i].
-					target_table_entry.vlan_id = 0;
-
-				DP(NETIF_MSG_IFUP,
-				   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
-				   config->config_table[i].
-						cam_entry.msb_mac_addr,
-				   config->config_table[i].
-						cam_entry.middle_mac_addr,
-				   config->config_table[i].
-						cam_entry.lsb_mac_addr);
-				i++;
-			}
-			old = config->hdr.length;
-			if (old > i) {
-				for (; i < old; i++) {
-					if (CAM_IS_INVALID(config->
-							   config_table[i])) {
-						/* already invalidated */
-						break;
-					}
-					/* invalidate */
-					CAM_INVALIDATE(config->
-						       config_table[i]);
-				}
-			}
-
-			if (CHIP_REV_IS_SLOW(bp))
-				offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
-			else
-				offset = BNX2X_MAX_MULTICAST*(1 + port);
-
-			config->hdr.length = i;
-			config->hdr.offset = offset;
-			config->hdr.client_id = bp->fp->cl_id;
-			config->hdr.reserved1 = 0;
-
-			bp->set_mac_pending++;
-			smp_wmb();
-
-			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
-				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
-				      0);
-		} else { /* E1H */
-			/* Accept one or more multicasts */
-			struct netdev_hw_addr *ha;
-			u32 mc_filter[MC_HASH_SIZE];
-			u32 crc, bit, regidx;
-			int i;
-
-			memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
-			netdev_for_each_mc_addr(ha, dev) {
-				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-				   ha->addr);
-
-				crc = crc32c_le(0, ha->addr, ETH_ALEN);
-				bit = (crc >> 24) & 0xff;
-				regidx = bit >> 5;
-				bit &= 0x1f;
-				mc_filter[regidx] |= (1 << bit);
-			}
-
-			for (i = 0; i < MC_HASH_SIZE; i++)
-				REG_WR(bp, MC_HASH_OFFSET(bp, i),
-				       mc_filter[i]);
-		}
-	}
-
-	bp->rx_mode = rx_mode;
-	bnx2x_set_storm_rx_mode(bp);
-}
-
-/* called with rtnl_lock */
-static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
-{
-	struct sockaddr *addr = p;
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
-		return -EINVAL;
-
-	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	if (netif_running(dev)) {
-		if (CHIP_IS_E1(bp))
-			bnx2x_set_eth_mac_addr_e1(bp, 1);
-		else
-			bnx2x_set_eth_mac_addr_e1h(bp, 1);
-	}
-
-	return 0;
-}
-
-/* called with rtnl_lock */
-static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
-			   int devad, u16 addr)
-{
-	struct bnx2x *bp = netdev_priv(netdev);
-	u16 value;
-	int rc;
-	u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
-	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
-	   prtad, devad, addr);
-
-	if (prtad != bp->mdio.prtad) {
-		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
-		   prtad, bp->mdio.prtad);
-		return -EINVAL;
-	}
-
-	/* The HW expects different devad if CL22 is used */
-	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
-
-	bnx2x_acquire_phy_lock(bp);
-	rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
-			     devad, addr, &value);
-	bnx2x_release_phy_lock(bp);
-	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
-
-	if (!rc)
-		rc = value;
-	return rc;
-}
-
-/* called with rtnl_lock */
-static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
-			    u16 addr, u16 value)
-{
-	struct bnx2x *bp = netdev_priv(netdev);
-	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-	int rc;
-
-	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
-			   " value 0x%x\n", prtad, devad, addr, value);
-
-	if (prtad != bp->mdio.prtad) {
-		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
-		   prtad, bp->mdio.prtad);
-		return -EINVAL;
-	}
-
-	/* The HW expects different devad if CL22 is used */
-	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
-
-	bnx2x_acquire_phy_lock(bp);
-	rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
-			      devad, addr, value);
-	bnx2x_release_phy_lock(bp);
-	return rc;
-}
-
-/* called with rtnl_lock */
-static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	struct mii_ioctl_data *mdio = if_mii(ifr);
-
-	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
-	   mdio->phy_id, mdio->reg_num, mdio->val_in);
-
-	if (!netif_running(dev))
-		return -EAGAIN;
-
-	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
-}
-
-/* called with rtnl_lock */
-static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int rc = 0;
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return -EAGAIN;
-	}
-
-	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
-	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
-		return -EINVAL;
-
-	/* This does not race with packet allocation
-	 * because the actual alloc size is
-	 * only updated as part of load
-	 */
-	dev->mtu = new_mtu;
-
-	if (netif_running(dev)) {
-		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-	}
-
-	return rc;
-}
-
-static void bnx2x_tx_timeout(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (!bp->panic)
-		bnx2x_panic();
-#endif
-	/* This allows the netif to be shutdown gracefully before resetting */
-	schedule_delayed_work(&bp->reset_task, 0);
-}
-
-#ifdef BCM_VLAN
-/* called with rtnl_lock */
-static void bnx2x_vlan_rx_register(struct net_device *dev,
-				   struct vlan_group *vlgrp)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	bp->vlgrp = vlgrp;
-
-	/* Set flags according to the required capabilities */
-	bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
-
-	if (dev->features & NETIF_F_HW_VLAN_TX)
-		bp->flags |= HW_VLAN_TX_FLAG;
-
-	if (dev->features & NETIF_F_HW_VLAN_RX)
-		bp->flags |= HW_VLAN_RX_FLAG;
-
-	if (netif_running(dev))
-		bnx2x_set_client_config(bp);
-}
-
-#endif
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-
-	disable_irq(bp->pdev->irq);
-	bnx2x_interrupt(bp->pdev->irq, dev);
-	enable_irq(bp->pdev->irq);
-}
-#endif
-
-static const struct net_device_ops bnx2x_netdev_ops = {
-	.ndo_open		= bnx2x_open,
-	.ndo_stop		= bnx2x_close,
-	.ndo_start_xmit		= bnx2x_start_xmit,
-	.ndo_set_multicast_list	= bnx2x_set_rx_mode,
-	.ndo_set_mac_address	= bnx2x_change_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_do_ioctl		= bnx2x_ioctl,
-	.ndo_change_mtu		= bnx2x_change_mtu,
-	.ndo_tx_timeout		= bnx2x_tx_timeout,
-#ifdef BCM_VLAN
-	.ndo_vlan_rx_register	= bnx2x_vlan_rx_register,
-#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= poll_bnx2x,
-#endif
-};
-
-static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
-				    struct net_device *dev)
-{
-	struct bnx2x *bp;
-	int rc;
-
-	SET_NETDEV_DEV(dev, &pdev->dev);
-	bp = netdev_priv(dev);
-
-	bp->dev = dev;
-	bp->pdev = pdev;
-	bp->flags = 0;
-	bp->func = PCI_FUNC(pdev->devfn);
-
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		dev_err(&bp->pdev->dev,
-			"Cannot enable PCI device, aborting\n");
-		goto err_out;
-	}
-
-	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-		dev_err(&bp->pdev->dev,
-			"Cannot find PCI device base address, aborting\n");
-		rc = -ENODEV;
-		goto err_out_disable;
-	}
-
-	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
-		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
-		       " base address, aborting\n");
-		rc = -ENODEV;
-		goto err_out_disable;
-	}
-
-	if (atomic_read(&pdev->enable_cnt) == 1) {
-		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
-		if (rc) {
-			dev_err(&bp->pdev->dev,
-				"Cannot obtain PCI resources, aborting\n");
-			goto err_out_disable;
-		}
-
-		pci_set_master(pdev);
-		pci_save_state(pdev);
-	}
-
-	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (bp->pm_cap == 0) {
-		dev_err(&bp->pdev->dev,
-			"Cannot find power management capability, aborting\n");
-		rc = -EIO;
-		goto err_out_release;
-	}
-
-	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-	if (bp->pcie_cap == 0) {
-		dev_err(&bp->pdev->dev,
-			"Cannot find PCI Express capability, aborting\n");
-		rc = -EIO;
-		goto err_out_release;
-	}
-
-	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
-		bp->flags |= USING_DAC_FLAG;
-		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
-			dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
-			       " failed, aborting\n");
-			rc = -EIO;
-			goto err_out_release;
-		}
-
-	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
-		dev_err(&bp->pdev->dev,
-			"System does not support DMA, aborting\n");
-		rc = -EIO;
-		goto err_out_release;
-	}
-
-	dev->mem_start = pci_resource_start(pdev, 0);
-	dev->base_addr = dev->mem_start;
-	dev->mem_end = pci_resource_end(pdev, 0);
-
-	dev->irq = pdev->irq;
-
-	bp->regview = pci_ioremap_bar(pdev, 0);
-	if (!bp->regview) {
-		dev_err(&bp->pdev->dev,
-			"Cannot map register space, aborting\n");
-		rc = -ENOMEM;
-		goto err_out_release;
-	}
-
-	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
-					min_t(u64, BNX2X_DB_SIZE,
-					      pci_resource_len(pdev, 2)));
-	if (!bp->doorbells) {
-		dev_err(&bp->pdev->dev,
-			"Cannot map doorbell space, aborting\n");
-		rc = -ENOMEM;
-		goto err_out_unmap;
-	}
-
-	bnx2x_set_power_state(bp, PCI_D0);
-
-	/* clean indirect addresses */
-	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
-			       PCICFG_VENDOR_ID_OFFSET);
-	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
-	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
-	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
-	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
-
-	/* Reset the load counter */
-	bnx2x_clear_load_cnt(bp);
-
-	dev->watchdog_timeo = TX_TIMEOUT;
-
-	dev->netdev_ops = &bnx2x_netdev_ops;
-	dev->ethtool_ops = &bnx2x_ethtool_ops;
-	dev->features |= NETIF_F_SG;
-	dev->features |= NETIF_F_HW_CSUM;
-	if (bp->flags & USING_DAC_FLAG)
-		dev->features |= NETIF_F_HIGHDMA;
-	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
-	dev->features |= NETIF_F_TSO6;
-#ifdef BCM_VLAN
-	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
-	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
-
-	dev->vlan_features |= NETIF_F_SG;
-	dev->vlan_features |= NETIF_F_HW_CSUM;
-	if (bp->flags & USING_DAC_FLAG)
-		dev->vlan_features |= NETIF_F_HIGHDMA;
-	dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
-	dev->vlan_features |= NETIF_F_TSO6;
-#endif
-
-	/* get_port_hwinfo() will set prtad and mmds properly */
-	bp->mdio.prtad = MDIO_PRTAD_NONE;
-	bp->mdio.mmds = 0;
-	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-	bp->mdio.dev = dev;
-	bp->mdio.mdio_read = bnx2x_mdio_read;
-	bp->mdio.mdio_write = bnx2x_mdio_write;
-
-	return 0;
-
-err_out_unmap:
-	if (bp->regview) {
-		iounmap(bp->regview);
-		bp->regview = NULL;
-	}
-	if (bp->doorbells) {
-		iounmap(bp->doorbells);
-		bp->doorbells = NULL;
-	}
-
-err_out_release:
-	if (atomic_read(&pdev->enable_cnt) == 1)
-		pci_release_regions(pdev);
-
-err_out_disable:
-	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
-
-err_out:
-	return rc;
-}
-
-static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
-						 int *width, int *speed)
-{
-	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
-
-	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
-	/* return value of 1=2.5GHz 2=5GHz */
-	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-}
-
-static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
-{
-	const struct firmware *firmware = bp->firmware;
-	struct bnx2x_fw_file_hdr *fw_hdr;
-	struct bnx2x_fw_file_section *sections;
-	u32 offset, len, num_ops;
-	u16 *ops_offsets;
-	int i;
-	const u8 *fw_ver;
-
-	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
-		return -EINVAL;
-
-	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
-	sections = (struct bnx2x_fw_file_section *)fw_hdr;
-
-	/* Make sure none of the offsets and sizes make us read beyond
-	 * the end of the firmware data */
-	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
-		offset = be32_to_cpu(sections[i].offset);
-		len = be32_to_cpu(sections[i].len);
-		if (offset + len > firmware->size) {
-			dev_err(&bp->pdev->dev,
-				"Section %d length is out of bounds\n", i);
-			return -EINVAL;
-		}
-	}
-
-	/* Likewise for the init_ops offsets */
-	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
-	ops_offsets = (u16 *)(firmware->data + offset);
-	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
-
-	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
-		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
-			dev_err(&bp->pdev->dev,
-				"Section offset %d is out of bounds\n", i);
-			return -EINVAL;
-		}
-	}
-
-	/* Check FW version */
-	offset = be32_to_cpu(fw_hdr->fw_version.offset);
-	fw_ver = firmware->data + offset;
-	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
-	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
-	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
-	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
-		dev_err(&bp->pdev->dev,
-			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
-		       fw_ver[0], fw_ver[1], fw_ver[2],
-		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
-		       BCM_5710_FW_MINOR_VERSION,
-		       BCM_5710_FW_REVISION_VERSION,
-		       BCM_5710_FW_ENGINEERING_VERSION);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
-{
-	const __be32 *source = (const __be32 *)_source;
-	u32 *target = (u32 *)_target;
-	u32 i;
-
-	for (i = 0; i < n/4; i++)
-		target[i] = be32_to_cpu(source[i]);
-}
-
-/*
-   Ops array is stored in the following format:
-   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
- */
-static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
-{
-	const __be32 *source = (const __be32 *)_source;
-	struct raw_op *target = (struct raw_op *)_target;
-	u32 i, j, tmp;
-
-	for (i = 0, j = 0; i < n/8; i++, j += 2) {
-		tmp = be32_to_cpu(source[j]);
-		target[i].op = (tmp >> 24) & 0xff;
-		target[i].offset = tmp & 0xffffff;
-		target[i].raw_data = be32_to_cpu(source[j + 1]);
-	}
-}
-
-static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
-{
-	const __be16 *source = (const __be16 *)_source;
-	u16 *target = (u16 *)_target;
-	u32 i;
-
-	for (i = 0; i < n/2; i++)
-		target[i] = be16_to_cpu(source[i]);
-}
-
-#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
-do {									\
-	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
-	bp->arr = kmalloc(len, GFP_KERNEL);				\
-	if (!bp->arr) {							\
-		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
-		goto lbl;						\
-	}								\
-	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
-	     (u8 *)bp->arr, len);					\
-} while (0)
-
-static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
-{
-	const char *fw_file_name;
-	struct bnx2x_fw_file_hdr *fw_hdr;
-	int rc;
-
-	if (CHIP_IS_E1(bp))
-		fw_file_name = FW_FILE_NAME_E1;
-	else if (CHIP_IS_E1H(bp))
-		fw_file_name = FW_FILE_NAME_E1H;
-	else {
-		dev_err(dev, "Unsupported chip revision\n");
-		return -EINVAL;
-	}
-
-	dev_info(dev, "Loading %s\n", fw_file_name);
-
-	rc = request_firmware(&bp->firmware, fw_file_name, dev);
-	if (rc) {
-		dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
-		goto request_firmware_exit;
-	}
-
-	rc = bnx2x_check_firmware(bp);
-	if (rc) {
-		dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
-		goto request_firmware_exit;
-	}
-
-	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
-
-	/* Initialize the pointers to the init arrays */
-	/* Blob */
-	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
-
-	/* Opcodes */
-	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
-
-	/* Offsets */
-	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
-			    be16_to_cpu_n);
-
-	/* STORMs firmware */
-	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
-			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
-	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
-			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
-	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
-			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
-	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
-			be32_to_cpu(fw_hdr->usem_pram_data.offset);
-	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
-			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
-	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
-			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
-	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
-			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
-	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
-			be32_to_cpu(fw_hdr->csem_pram_data.offset);
-
-	return 0;
-
-init_offsets_alloc_err:
-	kfree(bp->init_ops);
-init_ops_alloc_err:
-	kfree(bp->init_data);
-request_firmware_exit:
-	release_firmware(bp->firmware);
-
-	return rc;
-}
-
-
-static int __devinit bnx2x_init_one(struct pci_dev *pdev,
-				    const struct pci_device_id *ent)
-{
-	struct net_device *dev = NULL;
-	struct bnx2x *bp;
-	int pcie_width, pcie_speed;
-	int rc;
-
-	/* dev zeroed in init_etherdev */
-	dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
-	if (!dev) {
-		dev_err(&pdev->dev, "Cannot allocate net device\n");
-		return -ENOMEM;
-	}
-
-	bp = netdev_priv(dev);
-	bp->msg_enable = debug;
-
-	pci_set_drvdata(pdev, dev);
-
-	rc = bnx2x_init_dev(pdev, dev);
-	if (rc < 0) {
-		free_netdev(dev);
-		return rc;
-	}
-
-	rc = bnx2x_init_bp(bp);
-	if (rc)
-		goto init_one_exit;
-
-	/* Set init arrays */
-	rc = bnx2x_init_firmware(bp, &pdev->dev);
-	if (rc) {
-		dev_err(&pdev->dev, "Error loading firmware\n");
-		goto init_one_exit;
-	}
-
-	rc = register_netdev(dev);
-	if (rc) {
-		dev_err(&pdev->dev, "Cannot register net device\n");
-		goto init_one_exit;
-	}
-
-	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
-	       " IRQ %d, ", board_info[ent->driver_data].name,
-	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
-	       pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
-	       dev->base_addr, bp->pdev->irq);
-	pr_cont("node addr %pM\n", dev->dev_addr);
-
-	return 0;
-
-init_one_exit:
-	if (bp->regview)
-		iounmap(bp->regview);
-
-	if (bp->doorbells)
-		iounmap(bp->doorbells);
-
-	free_netdev(dev);
-
-	if (atomic_read(&pdev->enable_cnt) == 1)
-		pci_release_regions(pdev);
-
-	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
-
-	return rc;
-}
-
-static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp;
-
-	if (!dev) {
-		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-		return;
-	}
-	bp = netdev_priv(dev);
-
-	unregister_netdev(dev);
-
-	/* Make sure RESET task is not scheduled before continuing */
-	cancel_delayed_work_sync(&bp->reset_task);
-
-	kfree(bp->init_ops_offsets);
-	kfree(bp->init_ops);
-	kfree(bp->init_data);
-	release_firmware(bp->firmware);
-
-	if (bp->regview)
-		iounmap(bp->regview);
-
-	if (bp->doorbells)
-		iounmap(bp->doorbells);
-
-	free_netdev(dev);
-
-	if (atomic_read(&pdev->enable_cnt) == 1)
-		pci_release_regions(pdev);
-
-	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
-}
-
-static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp;
-
-	if (!dev) {
-		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-		return -ENODEV;
-	}
-	bp = netdev_priv(dev);
-
-	rtnl_lock();
-
-	pci_save_state(pdev);
-
-	if (!netif_running(dev)) {
-		rtnl_unlock();
-		return 0;
-	}
-
-	netif_device_detach(dev);
-
-	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
-
-	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
-
-	rtnl_unlock();
-
-	return 0;
-}
-
-static int bnx2x_resume(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp;
-	int rc;
-
-	if (!dev) {
-		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-		return -ENODEV;
-	}
-	bp = netdev_priv(dev);
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return -EAGAIN;
-	}
-
-	rtnl_lock();
-
-	pci_restore_state(pdev);
-
-	if (!netif_running(dev)) {
-		rtnl_unlock();
-		return 0;
-	}
-
-	bnx2x_set_power_state(bp, PCI_D0);
-	netif_device_attach(dev);
-
-	rc = bnx2x_nic_load(bp, LOAD_OPEN);
-
-	rtnl_unlock();
-
-	return rc;
-}
-
-static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
-{
-	int i;
-
-	bp->state = BNX2X_STATE_ERROR;
-
-	bp->rx_mode = BNX2X_RX_MODE_NONE;
-
-	bnx2x_netif_stop(bp, 0);
-	netif_carrier_off(bp->dev);
-
-	del_timer_sync(&bp->timer);
-	bp->stats_state = STATS_STATE_DISABLED;
-	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
-
-	/* Release IRQs */
-	bnx2x_free_irq(bp, false);
-
-	if (CHIP_IS_E1(bp)) {
-		struct mac_configuration_cmd *config =
-						bnx2x_sp(bp, mcast_config);
-
-		for (i = 0; i < config->hdr.length; i++)
-			CAM_INVALIDATE(config->config_table[i]);
-	}
-
-	/* Free SKBs, SGEs, TPA pool and driver internals */
-	bnx2x_free_skbs(bp);
-	for_each_queue(bp, i)
-		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-	for_each_queue(bp, i)
-		netif_napi_del(&bnx2x_fp(bp, i, napi));
-	bnx2x_free_mem(bp);
-
-	bp->state = BNX2X_STATE_CLOSED;
-
-	return 0;
-}
-
-static void bnx2x_eeh_recover(struct bnx2x *bp)
-{
-	u32 val;
-
-	mutex_init(&bp->port.phy_mutex);
-
-	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-	bp->link_params.shmem_base = bp->common.shmem_base;
-	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
-
-	if (!bp->common.shmem_base ||
-	    (bp->common.shmem_base < 0xA0000) ||
-	    (bp->common.shmem_base >= 0xC0000)) {
-		BNX2X_DEV_INFO("MCP not active\n");
-		bp->flags |= NO_MCP_FLAG;
-		return;
-	}
-
-	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
-	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-		BNX2X_ERR("BAD MCP validity signature\n");
-
-	if (!BP_NOMCP(bp)) {
-		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
-			      & DRV_MSG_SEQ_NUMBER_MASK);
-		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-	}
-}
-
-/**
- * bnx2x_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
- *
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
-static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
-						pci_channel_state_t state)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp = netdev_priv(dev);
-
-	rtnl_lock();
-
-	netif_device_detach(dev);
-
-	if (state == pci_channel_io_perm_failure) {
-		rtnl_unlock();
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	if (netif_running(dev))
-		bnx2x_eeh_nic_unload(bp);
-
-	pci_disable_device(pdev);
-
-	rtnl_unlock();
-
-	/* Request a slot reset */
-	return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * bnx2x_io_slot_reset - called after the PCI bus has been reset
- * @pdev: Pointer to PCI device
- *
- * Restart the card from scratch, as if from a cold-boot.
- */
-static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp = netdev_priv(dev);
-
-	rtnl_lock();
-
-	if (pci_enable_device(pdev)) {
-		dev_err(&pdev->dev,
-			"Cannot re-enable PCI device after reset\n");
-		rtnl_unlock();
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	pci_set_master(pdev);
-	pci_restore_state(pdev);
-
-	if (netif_running(dev))
-		bnx2x_set_power_state(bp, PCI_D0);
-
-	rtnl_unlock();
-
-	return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * bnx2x_io_resume - called when traffic can start flowing again
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation.
- */
-static void bnx2x_io_resume(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct bnx2x *bp = netdev_priv(dev);
-
-	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-		return;
-	}
-
-	rtnl_lock();
-
-	bnx2x_eeh_recover(bp);
-
-	if (netif_running(dev))
-		bnx2x_nic_load(bp, LOAD_NORMAL);
-
-	netif_device_attach(dev);
-
-	rtnl_unlock();
-}
-
-static struct pci_error_handlers bnx2x_err_handler = {
-	.error_detected = bnx2x_io_error_detected,
-	.slot_reset     = bnx2x_io_slot_reset,
-	.resume         = bnx2x_io_resume,
-};
-
-static struct pci_driver bnx2x_pci_driver = {
-	.name        = DRV_MODULE_NAME,
-	.id_table    = bnx2x_pci_tbl,
-	.probe       = bnx2x_init_one,
-	.remove      = __devexit_p(bnx2x_remove_one),
-	.suspend     = bnx2x_suspend,
-	.resume      = bnx2x_resume,
-	.err_handler = &bnx2x_err_handler,
-};
-
-static int __init bnx2x_init(void)
-{
-	int ret;
-
-	pr_info("%s", version);
-
-	bnx2x_wq = create_singlethread_workqueue("bnx2x");
-	if (bnx2x_wq == NULL) {
-		pr_err("Cannot create workqueue\n");
-		return -ENOMEM;
-	}
-
-	ret = pci_register_driver(&bnx2x_pci_driver);
-	if (ret) {
-		pr_err("Cannot register driver\n");
-		destroy_workqueue(bnx2x_wq);
-	}
-	return ret;
-}
-
-static void __exit bnx2x_cleanup(void)
-{
-	pci_unregister_driver(&bnx2x_pci_driver);
-
-	destroy_workqueue(bnx2x_wq);
-}
-
-module_init(bnx2x_init);
-module_exit(bnx2x_cleanup);
-
-#ifdef BCM_CNIC
-
-/* count denotes the number of new completions we have seen */
-static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
-{
-	struct eth_spe *spe;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return;
-#endif
-
-	spin_lock_bh(&bp->spq_lock);
-	bp->cnic_spq_pending -= count;
-
-	for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
-	     bp->cnic_spq_pending++) {
-
-		if (!bp->cnic_kwq_pending)
-			break;
-
-		spe = bnx2x_sp_get_next(bp);
-		*spe = *bp->cnic_kwq_cons;
-
-		bp->cnic_kwq_pending--;
-
-		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
-		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
-
-		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
-			bp->cnic_kwq_cons = bp->cnic_kwq;
-		else
-			bp->cnic_kwq_cons++;
-	}
-	bnx2x_sp_prod_update(bp);
-	spin_unlock_bh(&bp->spq_lock);
-}
-
-static int bnx2x_cnic_sp_queue(struct net_device *dev,
-			       struct kwqe_16 *kwqes[], u32 count)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int i;
-
-#ifdef BNX2X_STOP_ON_ERROR
-	if (unlikely(bp->panic))
-		return -EIO;
-#endif
-
-	spin_lock_bh(&bp->spq_lock);
-
-	for (i = 0; i < count; i++) {
-		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
-
-		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
-			break;
-
-		*bp->cnic_kwq_prod = *spe;
-
-		bp->cnic_kwq_pending++;
-
-		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
-		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
-		   spe->data.mac_config_addr.hi,
-		   spe->data.mac_config_addr.lo,
-		   bp->cnic_kwq_pending);
-
-		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
-			bp->cnic_kwq_prod = bp->cnic_kwq;
-		else
-			bp->cnic_kwq_prod++;
-	}
-
-	spin_unlock_bh(&bp->spq_lock);
-
-	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
-		bnx2x_cnic_sp_post(bp, 0);
-
-	return i;
-}
-
-static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
-{
-	struct cnic_ops *c_ops;
-	int rc = 0;
-
-	mutex_lock(&bp->cnic_mutex);
-	c_ops = bp->cnic_ops;
-	if (c_ops)
-		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
-	mutex_unlock(&bp->cnic_mutex);
-
-	return rc;
-}
-
-static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
-{
-	struct cnic_ops *c_ops;
-	int rc = 0;
-
-	rcu_read_lock();
-	c_ops = rcu_dereference(bp->cnic_ops);
-	if (c_ops)
-		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
-	rcu_read_unlock();
-
-	return rc;
-}
-
-/*
- * for commands that have no data
- */
-static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
-{
-	struct cnic_ctl_info ctl = {0};
-
-	ctl.cmd = cmd;
-
-	return bnx2x_cnic_ctl_send(bp, &ctl);
-}
-
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
-{
-	struct cnic_ctl_info ctl;
-
-	/* first we tell CNIC and only then we count this as a completion */
-	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
-	ctl.data.comp.cid = cid;
-
-	bnx2x_cnic_ctl_send_bh(bp, &ctl);
-	bnx2x_cnic_sp_post(bp, 1);
-}
-
-static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	int rc = 0;
-
-	switch (ctl->cmd) {
-	case DRV_CTL_CTXTBL_WR_CMD: {
-		u32 index = ctl->data.io.offset;
-		dma_addr_t addr = ctl->data.io.dma_addr;
-
-		bnx2x_ilt_wr(bp, index, addr);
-		break;
-	}
-
-	case DRV_CTL_COMPLETION_CMD: {
-		int count = ctl->data.comp.comp_count;
-
-		bnx2x_cnic_sp_post(bp, count);
-		break;
-	}
-
-	/* rtnl_lock is held.  */
-	case DRV_CTL_START_L2_CMD: {
-		u32 cli = ctl->data.ring.client_id;
-
-		bp->rx_mode_cl_mask |= (1 << cli);
-		bnx2x_set_storm_rx_mode(bp);
-		break;
-	}
-
-	/* rtnl_lock is held.  */
-	case DRV_CTL_STOP_L2_CMD: {
-		u32 cli = ctl->data.ring.client_id;
-
-		bp->rx_mode_cl_mask &= ~(1 << cli);
-		bnx2x_set_storm_rx_mode(bp);
-		break;
-	}
-
-	default:
-		BNX2X_ERR("unknown command %x\n", ctl->cmd);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
-{
-	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-
-	if (bp->flags & USING_MSIX_FLAG) {
-		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
-		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
-		cp->irq_arr[0].vector = bp->msix_table[1].vector;
-	} else {
-		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
-		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
-	}
-	cp->irq_arr[0].status_blk = bp->cnic_sb;
-	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
-	cp->irq_arr[1].status_blk = bp->def_status_blk;
-	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
-
-	cp->num_irq = 2;
-}
-
-static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
-			       void *data)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-
-	if (ops == NULL)
-		return -EINVAL;
-
-	if (atomic_read(&bp->intr_sem) != 0)
-		return -EBUSY;
-
-	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!bp->cnic_kwq)
-		return -ENOMEM;
-
-	bp->cnic_kwq_cons = bp->cnic_kwq;
-	bp->cnic_kwq_prod = bp->cnic_kwq;
-	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
-
-	bp->cnic_spq_pending = 0;
-	bp->cnic_kwq_pending = 0;
-
-	bp->cnic_data = data;
-
-	cp->num_irq = 0;
-	cp->drv_state = CNIC_DRV_STATE_REGD;
-
-	bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
-
-	bnx2x_setup_cnic_irq_info(bp);
-	bnx2x_set_iscsi_eth_mac_addr(bp, 1);
-	bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
-	rcu_assign_pointer(bp->cnic_ops, ops);
-
-	return 0;
-}
-
-static int bnx2x_unregister_cnic(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-
-	mutex_lock(&bp->cnic_mutex);
-	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
-		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
-		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
-	}
-	cp->drv_state = 0;
-	rcu_assign_pointer(bp->cnic_ops, NULL);
-	mutex_unlock(&bp->cnic_mutex);
-	synchronize_rcu();
-	kfree(bp->cnic_kwq);
-	bp->cnic_kwq = NULL;
-
-	return 0;
-}
-
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
-{
-	struct bnx2x *bp = netdev_priv(dev);
-	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-
-	cp->drv_owner = THIS_MODULE;
-	cp->chip_id = CHIP_ID(bp);
-	cp->pdev = bp->pdev;
-	cp->io_base = bp->regview;
-	cp->io_base2 = bp->doorbells;
-	cp->max_kwqe_pending = 8;
-	cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
-	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
-	cp->ctx_tbl_len = CNIC_ILT_LINES;
-	cp->starting_cid = BCM_CNIC_CID_START;
-	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
-	cp->drv_ctl = bnx2x_drv_ctl;
-	cp->drv_register_cnic = bnx2x_register_cnic;
-	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
-
-	return cp;
-}
-EXPORT_SYMBOL(bnx2x_cnic_probe);
-
-#endif /* BCM_CNIC */
-
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 3662d6e..c746b33 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -682,7 +682,7 @@
 			client_info->ntt = 0;
 		}
 
-		if (!list_empty(&bond->vlan_list)) {
+		if (bond->vlgrp) {
 			if (!vlan_get_tag(skb, &client_info->vlan_id))
 				client_info->tag = 1;
 		}
@@ -815,7 +815,7 @@
 
 	/*initialize packet type*/
 	pk_type->type = cpu_to_be16(ETH_P_ARP);
-	pk_type->dev = NULL;
+	pk_type->dev = bond->dev;
 	pk_type->func = rlb_arp_recv;
 
 	/* register to receive ARPs */
@@ -904,7 +904,7 @@
 		skb->priority = TC_PRIO_CONTROL;
 		skb->dev = slave->dev;
 
-		if (!list_empty(&bond->vlan_list)) {
+		if (bond->vlgrp) {
 			struct vlan_entry *vlan;
 
 			vlan = bond_next_vlan(bond,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 20f45cb..2cc4cfc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -424,6 +424,7 @@
 {
 	unsigned short uninitialized_var(vlan_id);
 
+	/* Test vlan_list not vlgrp to catch and handle 802.1p tags */
 	if (!list_empty(&bond->vlan_list) &&
 	    !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
 	    vlan_get_tag(skb, &vlan_id) == 0) {
@@ -487,7 +488,9 @@
 	struct slave *slave;
 	int i;
 
+	write_lock(&bond->lock);
 	bond->vlgrp = grp;
+	write_unlock(&bond->lock);
 
 	bond_for_each_slave(bond, slave, i) {
 		struct net_device *slave_dev = slave->dev;
@@ -567,10 +570,8 @@
 	struct vlan_entry *vlan;
 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 
-	write_lock_bh(&bond->lock);
-
-	if (list_empty(&bond->vlan_list))
-		goto out;
+	if (!bond->vlgrp)
+		return;
 
 	if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
 	    slave_ops->ndo_vlan_rx_register)
@@ -578,13 +579,10 @@
 
 	if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
 	    !(slave_ops->ndo_vlan_rx_add_vid))
-		goto out;
+		return;
 
 	list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
 		slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
-
-out:
-	write_unlock_bh(&bond->lock);
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
@@ -594,16 +592,16 @@
 	struct vlan_entry *vlan;
 	struct net_device *vlan_dev;
 
-	write_lock_bh(&bond->lock);
-
-	if (list_empty(&bond->vlan_list))
-		goto out;
+	if (!bond->vlgrp)
+		return;
 
 	if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
 	    !(slave_ops->ndo_vlan_rx_kill_vid))
 		goto unreg;
 
 	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+		if (!vlan->vlan_id)
+			continue;
 		/* Save and then restore vlan_dev in the grp array,
 		 * since the slave's driver might clear it.
 		 */
@@ -616,9 +614,6 @@
 	if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
 	    slave_ops->ndo_vlan_rx_register)
 		slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
-
-out:
-	write_unlock_bh(&bond->lock);
 }
 
 /*------------------------------- Link status -------------------------------*/
@@ -1443,7 +1438,7 @@
 	/* no need to lock since we're protected by rtnl_lock */
 	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
 		pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
-		if (!list_empty(&bond->vlan_list)) {
+		if (bond->vlgrp) {
 			pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
 			       bond_dev->name, slave_dev->name, bond_dev->name);
 			return -EPERM;
@@ -1942,7 +1937,7 @@
 		 */
 		memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-		if (list_empty(&bond->vlan_list)) {
+		if (!bond->vlgrp) {
 			bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
 		} else {
 			pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -2134,9 +2129,9 @@
 	 */
 	memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-	if (list_empty(&bond->vlan_list))
+	if (!bond->vlgrp) {
 		bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
-	else {
+	} else {
 		pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
 			   bond_dev->name, bond_dev->name);
 		pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2569,7 +2564,7 @@
 		if (!targets[i])
 			break;
 		pr_debug("basa: target %x\n", targets[i]);
-		if (list_empty(&bond->vlan_list)) {
+		if (!bond->vlgrp) {
 			pr_debug("basa: empty vlan: arp_send\n");
 			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
 				      bond->master_ip, 0);
@@ -2658,6 +2653,9 @@
 				bond->master_ip, 0);
 	}
 
+	if (!bond->vlgrp)
+		return;
+
 	list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
 		vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
 		if (vlan->vlan_ip) {
@@ -3590,6 +3588,8 @@
 		}
 
 		list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+			if (!bond->vlgrp)
+				continue;
 			vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
 			if (vlan_dev == event_dev) {
 				switch (event) {
@@ -4686,6 +4686,7 @@
 static void bond_uninit(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct vlan_entry *vlan, *tmp;
 
 	bond_netpoll_cleanup(bond_dev);
 
@@ -4699,6 +4700,11 @@
 	bond_remove_proc_entry(bond);
 
 	__hw_addr_flush(&bond->mc_list);
+
+	list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
+		list_del(&vlan->vlan_list);
+		kfree(vlan);
+	}
 }
 
 /*------------------------- Module initialization ---------------------------*/
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1a99764..c311aed 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -313,19 +313,26 @@
 		       bond->dev->name, (int)strlen(buf) - 1, buf);
 		ret = -EINVAL;
 		goto out;
-	} else {
-		if (bond->params.mode == BOND_MODE_8023AD)
-			bond_unset_master_3ad_flags(bond);
-
-		if (bond->params.mode == BOND_MODE_ALB)
-			bond_unset_master_alb_flags(bond);
-
-		bond->params.mode = new_value;
-		bond_set_mode_ops(bond, bond->params.mode);
-		pr_info("%s: setting mode to %s (%d).\n",
-			bond->dev->name, bond_mode_tbl[new_value].modename,
-		       new_value);
 	}
+	if ((new_value == BOND_MODE_ALB ||
+	     new_value == BOND_MODE_TLB) &&
+	    bond->params.arp_interval) {
+		pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+		       bond->dev->name, bond_mode_tbl[new_value].modename);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (bond->params.mode == BOND_MODE_8023AD)
+		bond_unset_master_3ad_flags(bond);
+
+	if (bond->params.mode == BOND_MODE_ALB)
+		bond_unset_master_alb_flags(bond);
+
+	bond->params.mode = new_value;
+	bond_set_mode_ops(bond, bond->params.mode);
+	pr_info("%s: setting mode to %s (%d).\n",
+		bond->dev->name, bond_mode_tbl[new_value].modename,
+		new_value);
 out:
 	return ret;
 }
@@ -510,7 +517,13 @@
 		ret = -EINVAL;
 		goto out;
 	}
-
+	if (bond->params.mode == BOND_MODE_ALB ||
+	    bond->params.mode == BOND_MODE_TLB) {
+		pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
+			bond->dev->name, bond->dev->name);
+		ret = -EINVAL;
+		goto out;
+	}
 	pr_info("%s: Setting ARP monitoring interval to %d.\n",
 		bond->dev->name, new_value);
 	bond->params.arp_interval = new_value;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 6c94803..f5058ff 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -165,6 +165,9 @@
 	len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
 			"Next RX len: %d\n", cfspi->rx_npck_len);
 
+	if (len > DEBUGFS_BUF_SIZE)
+		len = DEBUGFS_BUF_SIZE;
+
 	size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 	kfree(buf);
 
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2c5227c..9d9e453 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -73,6 +73,15 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called janz-ican3.ko.
 
+config HAVE_CAN_FLEXCAN
+	bool
+
+config CAN_FLEXCAN
+	tristate "Support for Freescale FLEXCAN based chips"
+	depends on CAN_DEV && HAVE_CAN_FLEXCAN
+	---help---
+	  Say Y here if you want to support for Freescale FlexCAN.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 9047cd0..0057537 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,5 +16,6 @@
 obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
 obj-$(CONFIG_CAN_BFIN)		+= bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)	+= janz-ican3.o
+obj-$(CONFIG_CAN_FLEXCAN)	+= flexcan.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
new file mode 100644
index 0000000..ef443a0
--- /dev/null
+++ b/drivers/net/can/flexcan.c
@@ -0,0 +1,1030 @@
+/*
+ * flexcan.c - FLEXCAN CAN controller driver
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ * LICENCE:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/platform/flexcan.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <mach/clock.h>
+
+#define DRV_NAME			"flexcan"
+
+/* 8 for RX fifo and 2 error handling */
+#define FLEXCAN_NAPI_WEIGHT		(8 + 2)
+
+/* FLEXCAN module configuration register (CANMCR) bits */
+#define FLEXCAN_MCR_MDIS		BIT(31)
+#define FLEXCAN_MCR_FRZ			BIT(30)
+#define FLEXCAN_MCR_FEN			BIT(29)
+#define FLEXCAN_MCR_HALT		BIT(28)
+#define FLEXCAN_MCR_NOT_RDY		BIT(27)
+#define FLEXCAN_MCR_WAK_MSK		BIT(26)
+#define FLEXCAN_MCR_SOFTRST		BIT(25)
+#define FLEXCAN_MCR_FRZ_ACK		BIT(24)
+#define FLEXCAN_MCR_SUPV		BIT(23)
+#define FLEXCAN_MCR_SLF_WAK		BIT(22)
+#define FLEXCAN_MCR_WRN_EN		BIT(21)
+#define FLEXCAN_MCR_LPM_ACK		BIT(20)
+#define FLEXCAN_MCR_WAK_SRC		BIT(19)
+#define FLEXCAN_MCR_DOZE		BIT(18)
+#define FLEXCAN_MCR_SRX_DIS		BIT(17)
+#define FLEXCAN_MCR_BCC			BIT(16)
+#define FLEXCAN_MCR_LPRIO_EN		BIT(13)
+#define FLEXCAN_MCR_AEN			BIT(12)
+#define FLEXCAN_MCR_MAXMB(x)		((x) & 0xf)
+#define FLEXCAN_MCR_IDAM_A		(0 << 8)
+#define FLEXCAN_MCR_IDAM_B		(1 << 8)
+#define FLEXCAN_MCR_IDAM_C		(2 << 8)
+#define FLEXCAN_MCR_IDAM_D		(3 << 8)
+
+/* FLEXCAN control register (CANCTRL) bits */
+#define FLEXCAN_CTRL_PRESDIV(x)		(((x) & 0xff) << 24)
+#define FLEXCAN_CTRL_RJW(x)		(((x) & 0x03) << 22)
+#define FLEXCAN_CTRL_PSEG1(x)		(((x) & 0x07) << 19)
+#define FLEXCAN_CTRL_PSEG2(x)		(((x) & 0x07) << 16)
+#define FLEXCAN_CTRL_BOFF_MSK		BIT(15)
+#define FLEXCAN_CTRL_ERR_MSK		BIT(14)
+#define FLEXCAN_CTRL_CLK_SRC		BIT(13)
+#define FLEXCAN_CTRL_LPB		BIT(12)
+#define FLEXCAN_CTRL_TWRN_MSK		BIT(11)
+#define FLEXCAN_CTRL_RWRN_MSK		BIT(10)
+#define FLEXCAN_CTRL_SMP		BIT(7)
+#define FLEXCAN_CTRL_BOFF_REC		BIT(6)
+#define FLEXCAN_CTRL_TSYN		BIT(5)
+#define FLEXCAN_CTRL_LBUF		BIT(4)
+#define FLEXCAN_CTRL_LOM		BIT(3)
+#define FLEXCAN_CTRL_PROPSEG(x)		((x) & 0x07)
+#define FLEXCAN_CTRL_ERR_BUS		(FLEXCAN_CTRL_ERR_MSK)
+#define FLEXCAN_CTRL_ERR_STATE \
+	(FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \
+	 FLEXCAN_CTRL_BOFF_MSK)
+#define FLEXCAN_CTRL_ERR_ALL \
+	(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
+
+/* FLEXCAN error and status register (ESR) bits */
+#define FLEXCAN_ESR_TWRN_INT		BIT(17)
+#define FLEXCAN_ESR_RWRN_INT		BIT(16)
+#define FLEXCAN_ESR_BIT1_ERR		BIT(15)
+#define FLEXCAN_ESR_BIT0_ERR		BIT(14)
+#define FLEXCAN_ESR_ACK_ERR		BIT(13)
+#define FLEXCAN_ESR_CRC_ERR		BIT(12)
+#define FLEXCAN_ESR_FRM_ERR		BIT(11)
+#define FLEXCAN_ESR_STF_ERR		BIT(10)
+#define FLEXCAN_ESR_TX_WRN		BIT(9)
+#define FLEXCAN_ESR_RX_WRN		BIT(8)
+#define FLEXCAN_ESR_IDLE		BIT(7)
+#define FLEXCAN_ESR_TXRX		BIT(6)
+#define FLEXCAN_EST_FLT_CONF_SHIFT	(4)
+#define FLEXCAN_ESR_FLT_CONF_MASK	(0x3 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_ACTIVE	(0x0 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_PASSIVE	(0x1 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_BOFF_INT		BIT(2)
+#define FLEXCAN_ESR_ERR_INT		BIT(1)
+#define FLEXCAN_ESR_WAK_INT		BIT(0)
+#define FLEXCAN_ESR_ERR_BUS \
+	(FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \
+	 FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \
+	 FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR)
+#define FLEXCAN_ESR_ERR_STATE \
+	(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
+#define FLEXCAN_ESR_ERR_ALL \
+	(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+
+/* FLEXCAN interrupt flag register (IFLAG) bits */
+#define FLEXCAN_TX_BUF_ID		8
+#define FLEXCAN_IFLAG_BUF(x)		BIT(x)
+#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW	BIT(7)
+#define FLEXCAN_IFLAG_RX_FIFO_WARN	BIT(6)
+#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE	BIT(5)
+#define FLEXCAN_IFLAG_DEFAULT \
+	(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
+	 FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
+
+/* FLEXCAN message buffers */
+#define FLEXCAN_MB_CNT_CODE(x)		(((x) & 0xf) << 24)
+#define FLEXCAN_MB_CNT_SRR		BIT(22)
+#define FLEXCAN_MB_CNT_IDE		BIT(21)
+#define FLEXCAN_MB_CNT_RTR		BIT(20)
+#define FLEXCAN_MB_CNT_LENGTH(x)	(((x) & 0xf) << 16)
+#define FLEXCAN_MB_CNT_TIMESTAMP(x)	((x) & 0xffff)
+
+#define FLEXCAN_MB_CODE_MASK		(0xf0ffffff)
+
+/* Structure of the message buffer */
+struct flexcan_mb {
+	u32 can_ctrl;
+	u32 can_id;
+	u32 data[2];
+};
+
+/* Structure of the hardware registers */
+struct flexcan_regs {
+	u32 mcr;		/* 0x00 */
+	u32 ctrl;		/* 0x04 */
+	u32 timer;		/* 0x08 */
+	u32 _reserved1;		/* 0x0c */
+	u32 rxgmask;		/* 0x10 */
+	u32 rx14mask;		/* 0x14 */
+	u32 rx15mask;		/* 0x18 */
+	u32 ecr;		/* 0x1c */
+	u32 esr;		/* 0x20 */
+	u32 imask2;		/* 0x24 */
+	u32 imask1;		/* 0x28 */
+	u32 iflag2;		/* 0x2c */
+	u32 iflag1;		/* 0x30 */
+	u32 _reserved2[19];
+	struct flexcan_mb cantxfg[64];
+};
+
+struct flexcan_priv {
+	struct can_priv can;
+	struct net_device *dev;
+	struct napi_struct napi;
+
+	void __iomem *base;
+	u32 reg_esr;
+	u32 reg_ctrl_default;
+
+	struct clk *clk;
+	struct flexcan_platform_data *pdata;
+};
+
+static struct can_bittiming_const flexcan_bittiming_const = {
+	.name = DRV_NAME,
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 256,
+	.brp_inc = 1,
+};
+
+/*
+ * Swtich transceiver on or off
+ */
+static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on)
+{
+	if (priv->pdata && priv->pdata->transceiver_switch)
+		priv->pdata->transceiver_switch(on);
+}
+
+static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+					      u32 reg_esr)
+{
+	return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+		(reg_esr & FLEXCAN_ESR_ERR_BUS);
+}
+
+static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg;
+
+	reg = readl(&regs->mcr);
+	reg &= ~FLEXCAN_MCR_MDIS;
+	writel(reg, &regs->mcr);
+
+	udelay(10);
+}
+
+static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg;
+
+	reg = readl(&regs->mcr);
+	reg |= FLEXCAN_MCR_MDIS;
+	writel(reg, &regs->mcr);
+}
+
+static int flexcan_get_berr_counter(const struct net_device *dev,
+				    struct can_berr_counter *bec)
+{
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg = readl(&regs->ecr);
+
+	bec->txerr = (reg >> 0) & 0xff;
+	bec->rxerr = (reg >> 8) & 0xff;
+
+	return 0;
+}
+
+static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct flexcan_regs __iomem *regs = priv->base;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	u32 can_id;
+	u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	netif_stop_queue(dev);
+
+	if (cf->can_id & CAN_EFF_FLAG) {
+		can_id = cf->can_id & CAN_EFF_MASK;
+		ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
+	} else {
+		can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		ctrl |= FLEXCAN_MB_CNT_RTR;
+
+	if (cf->can_dlc > 0) {
+		u32 data = be32_to_cpup((__be32 *)&cf->data[0]);
+		writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
+	}
+	if (cf->can_dlc > 3) {
+		u32 data = be32_to_cpup((__be32 *)&cf->data[4]);
+		writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
+	}
+
+	writel(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
+	writel(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+	kfree_skb(skb);
+
+	/* tx_packets is incremented in flexcan_irq */
+	stats->tx_bytes += cf->can_dlc;
+
+	return NETDEV_TX_OK;
+}
+
+static void do_bus_err(struct net_device *dev,
+		       struct can_frame *cf, u32 reg_esr)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	int rx_errors = 0, tx_errors = 0;
+
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
+		dev_dbg(dev->dev.parent, "BIT1_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+		tx_errors = 1;
+	}
+	if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
+		dev_dbg(dev->dev.parent, "BIT0_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+		tx_errors = 1;
+	}
+	if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
+		dev_dbg(dev->dev.parent, "ACK_ERR irq\n");
+		cf->can_id |= CAN_ERR_ACK;
+		cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+		tx_errors = 1;
+	}
+	if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
+		dev_dbg(dev->dev.parent, "CRC_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+		cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+		rx_errors = 1;
+	}
+	if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
+		dev_dbg(dev->dev.parent, "FRM_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+		rx_errors = 1;
+	}
+	if (reg_esr & FLEXCAN_ESR_STF_ERR) {
+		dev_dbg(dev->dev.parent, "STF_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+		rx_errors = 1;
+	}
+
+	priv->can.can_stats.bus_error++;
+	if (rx_errors)
+		dev->stats.rx_errors++;
+	if (tx_errors)
+		dev->stats.tx_errors++;
+}
+
+static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
+{
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	do_bus_err(dev, cf, reg_esr);
+	netif_receive_skb(skb);
+
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static void do_state(struct net_device *dev,
+		     struct can_frame *cf, enum can_state new_state)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct can_berr_counter bec;
+
+	flexcan_get_berr_counter(dev, &bec);
+
+	switch (priv->can.state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * from: ERROR_ACTIVE
+		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+		 * =>  : there was a warning int
+		 */
+		if (new_state >= CAN_STATE_ERROR_WARNING &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+			priv->can.can_stats.error_warning++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec.txerr > bec.rxerr) ?
+				CAN_ERR_CRTL_TX_WARNING :
+				CAN_ERR_CRTL_RX_WARNING;
+		}
+	case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+		/*
+		 * from: ERROR_ACTIVE, ERROR_WARNING
+		 * to  : ERROR_PASSIVE, BUS_OFF
+		 * =>  : error passive int
+		 */
+		if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+			priv->can.can_stats.error_passive++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec.txerr > bec.rxerr) ?
+				CAN_ERR_CRTL_TX_PASSIVE :
+				CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		break;
+	case CAN_STATE_BUS_OFF:
+		dev_err(dev->dev.parent,
+			"BUG! hardware recovered automatically from BUS_OFF\n");
+		break;
+	default:
+		break;
+	}
+
+	/* process state changes depending on the new state */
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		dev_dbg(dev->dev.parent, "Error Active\n");
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+		break;
+	case CAN_STATE_BUS_OFF:
+		cf->can_id |= CAN_ERR_BUSOFF;
+		can_bus_off(dev);
+		break;
+	default:
+		break;
+	}
+}
+
+static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct can_frame *cf;
+	enum can_state new_state;
+	int flt;
+
+	flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
+	if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
+		if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN |
+					FLEXCAN_ESR_RX_WRN))))
+			new_state = CAN_STATE_ERROR_ACTIVE;
+		else
+			new_state = CAN_STATE_ERROR_WARNING;
+	} else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE))
+		new_state = CAN_STATE_ERROR_PASSIVE;
+	else
+		new_state = CAN_STATE_BUS_OFF;
+
+	/* state hasn't changed */
+	if (likely(new_state == priv->can.state))
+		return 0;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	do_state(dev, cf, new_state);
+	priv->can.state = new_state;
+	netif_receive_skb(skb);
+
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static void flexcan_read_fifo(const struct net_device *dev,
+			      struct can_frame *cf)
+{
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	struct flexcan_mb __iomem *mb = &regs->cantxfg[0];
+	u32 reg_ctrl, reg_id;
+
+	reg_ctrl = readl(&mb->can_ctrl);
+	reg_id = readl(&mb->can_id);
+	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
+		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+	if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+	*(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0]));
+	*(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1]));
+
+	/* mark as read */
+	writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+	readl(&regs->timer);
+}
+
+static int flexcan_read_frame(struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, &cf);
+	if (unlikely(!skb)) {
+		stats->rx_dropped++;
+		return 0;
+	}
+
+	flexcan_read_fifo(dev, cf);
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static int flexcan_poll(struct napi_struct *napi, int quota)
+{
+	struct net_device *dev = napi->dev;
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg_iflag1, reg_esr;
+	int work_done = 0;
+
+	/*
+	 * The error bits are cleared on read,
+	 * use saved value from irq handler.
+	 */
+	reg_esr = readl(&regs->esr) | priv->reg_esr;
+
+	/* handle state changes */
+	work_done += flexcan_poll_state(dev, reg_esr);
+
+	/* handle RX-FIFO */
+	reg_iflag1 = readl(&regs->iflag1);
+	while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
+	       work_done < quota) {
+		work_done += flexcan_read_frame(dev);
+		reg_iflag1 = readl(&regs->iflag1);
+	}
+
+	/* report bus errors */
+	if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
+		work_done += flexcan_poll_bus_err(dev, reg_esr);
+
+	if (work_done < quota) {
+		napi_complete(napi);
+		/* enable IRQs */
+		writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+		writel(priv->reg_ctrl_default, &regs->ctrl);
+	}
+
+	return work_done;
+}
+
+static irqreturn_t flexcan_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct net_device_stats *stats = &dev->stats;
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg_iflag1, reg_esr;
+
+	reg_iflag1 = readl(&regs->iflag1);
+	reg_esr = readl(&regs->esr);
+	writel(FLEXCAN_ESR_ERR_INT, &regs->esr);	/* ACK err IRQ */
+
+	/*
+	 * schedule NAPI in case of:
+	 * - rx IRQ
+	 * - state change IRQ
+	 * - bus error IRQ and bus error reporting is activated
+	 */
+	if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
+	    (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+	    flexcan_has_and_handle_berr(priv, reg_esr)) {
+		/*
+		 * The error bits are cleared on read,
+		 * save them for later use.
+		 */
+		priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
+		writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE,
+		       &regs->imask1);
+		writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+		       &regs->ctrl);
+		napi_schedule(&priv->napi);
+	}
+
+	/* FIFO overflow */
+	if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+		writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+		dev->stats.rx_over_errors++;
+		dev->stats.rx_errors++;
+	}
+
+	/* transmission complete interrupt */
+	if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
+		/* tx_bytes is incremented in flexcan_start_xmit */
+		stats->tx_packets++;
+		writel((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
+		netif_wake_queue(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void flexcan_set_bittiming(struct net_device *dev)
+{
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	const struct can_bittiming *bt = &priv->can.bittiming;
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg;
+
+	reg = readl(&regs->ctrl);
+	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
+		 FLEXCAN_CTRL_RJW(0x3) |
+		 FLEXCAN_CTRL_PSEG1(0x7) |
+		 FLEXCAN_CTRL_PSEG2(0x7) |
+		 FLEXCAN_CTRL_PROPSEG(0x7) |
+		 FLEXCAN_CTRL_LPB |
+		 FLEXCAN_CTRL_SMP |
+		 FLEXCAN_CTRL_LOM);
+
+	reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
+		FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
+		FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) |
+		FLEXCAN_CTRL_RJW(bt->sjw - 1) |
+		FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+		reg |= FLEXCAN_CTRL_LPB;
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		reg |= FLEXCAN_CTRL_LOM;
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		reg |= FLEXCAN_CTRL_SMP;
+
+	dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg);
+	writel(reg, &regs->ctrl);
+
+	/* print chip status */
+	dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+		readl(&regs->mcr), readl(&regs->ctrl));
+}
+
+/*
+ * flexcan_chip_start
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static int flexcan_chip_start(struct net_device *dev)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	unsigned int i;
+	int err;
+	u32 reg_mcr, reg_ctrl;
+
+	/* enable module */
+	flexcan_chip_enable(priv);
+
+	/* soft reset */
+	writel(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+	udelay(10);
+
+	reg_mcr = readl(&regs->mcr);
+	if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
+		dev_err(dev->dev.parent,
+			"Failed to softreset can module (mcr=0x%08x)\n",
+			reg_mcr);
+		err = -ENODEV;
+		goto out;
+	}
+
+	flexcan_set_bittiming(dev);
+
+	/*
+	 * MCR
+	 *
+	 * enable freeze
+	 * enable fifo
+	 * halt now
+	 * only supervisor access
+	 * enable warning int
+	 * choose format C
+	 *
+	 */
+	reg_mcr = readl(&regs->mcr);
+	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
+		FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
+		FLEXCAN_MCR_IDAM_C;
+	dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+	writel(reg_mcr, &regs->mcr);
+
+	/*
+	 * CTRL
+	 *
+	 * disable timer sync feature
+	 *
+	 * disable auto busoff recovery
+	 * transmit lowest buffer first
+	 *
+	 * enable tx and rx warning interrupt
+	 * enable bus off interrupt
+	 * (== FLEXCAN_CTRL_ERR_STATE)
+	 *
+	 * _note_: we enable the "error interrupt"
+	 * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
+	 * warning or bus passive interrupts.
+	 */
+	reg_ctrl = readl(&regs->ctrl);
+	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
+	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
+		FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
+
+	/* save for later use */
+	priv->reg_ctrl_default = reg_ctrl;
+	dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+	writel(reg_ctrl, &regs->ctrl);
+
+	for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
+		writel(0, &regs->cantxfg[i].can_ctrl);
+		writel(0, &regs->cantxfg[i].can_id);
+		writel(0, &regs->cantxfg[i].data[0]);
+		writel(0, &regs->cantxfg[i].data[1]);
+
+		/* put MB into rx queue */
+		writel(FLEXCAN_MB_CNT_CODE(0x4), &regs->cantxfg[i].can_ctrl);
+	}
+
+	/* acceptance mask/acceptance code (accept everything) */
+	writel(0x0, &regs->rxgmask);
+	writel(0x0, &regs->rx14mask);
+	writel(0x0, &regs->rx15mask);
+
+	flexcan_transceiver_switch(priv, 1);
+
+	/* synchronize with the can bus */
+	reg_mcr = readl(&regs->mcr);
+	reg_mcr &= ~FLEXCAN_MCR_HALT;
+	writel(reg_mcr, &regs->mcr);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	/* enable FIFO interrupts */
+	writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+
+	/* print chip status */
+	dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n",
+		__func__, readl(&regs->mcr), readl(&regs->ctrl));
+
+	return 0;
+
+ out:
+	flexcan_chip_disable(priv);
+	return err;
+}
+
+/*
+ * flexcan_chip_stop
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static void flexcan_chip_stop(struct net_device *dev)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg;
+
+	/* Disable all interrupts */
+	writel(0, &regs->imask1);
+
+	/* Disable + halt module */
+	reg = readl(&regs->mcr);
+	reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
+	writel(reg, &regs->mcr);
+
+	flexcan_transceiver_switch(priv, 0);
+	priv->can.state = CAN_STATE_STOPPED;
+
+	return;
+}
+
+static int flexcan_open(struct net_device *dev)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	int err;
+
+	clk_enable(priv->clk);
+
+	err = open_candev(dev);
+	if (err)
+		goto out;
+
+	err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+	if (err)
+		goto out_close;
+
+	/* start chip and queuing */
+	err = flexcan_chip_start(dev);
+	if (err)
+		goto out_close;
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+
+ out_close:
+	close_candev(dev);
+ out:
+	clk_disable(priv->clk);
+
+	return err;
+}
+
+static int flexcan_close(struct net_device *dev)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	flexcan_chip_stop(dev);
+
+	free_irq(dev->irq, dev);
+	clk_disable(priv->clk);
+
+	close_candev(dev);
+
+	return 0;
+}
+
+static int flexcan_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	int err;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		err = flexcan_chip_start(dev);
+		if (err)
+			return err;
+
+		netif_wake_queue(dev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops flexcan_netdev_ops = {
+	.ndo_open	= flexcan_open,
+	.ndo_stop	= flexcan_close,
+	.ndo_start_xmit	= flexcan_start_xmit,
+};
+
+static int __devinit register_flexcandev(struct net_device *dev)
+{
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->base;
+	u32 reg, err;
+
+	clk_enable(priv->clk);
+
+	/* select "bus clock", chip must be disabled */
+	flexcan_chip_disable(priv);
+	reg = readl(&regs->ctrl);
+	reg |= FLEXCAN_CTRL_CLK_SRC;
+	writel(reg, &regs->ctrl);
+
+	flexcan_chip_enable(priv);
+
+	/* set freeze, halt and activate FIFO, restrict register access */
+	reg = readl(&regs->mcr);
+	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+	writel(reg, &regs->mcr);
+
+	/*
+	 * Currently we only support newer versions of this core
+	 * featuring a RX FIFO. Older cores found on some Coldfire
+	 * derivates are not yet supported.
+	 */
+	reg = readl(&regs->mcr);
+	if (!(reg & FLEXCAN_MCR_FEN)) {
+		dev_err(dev->dev.parent,
+			"Could not enable RX FIFO, unsupported core\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = register_candev(dev);
+
+ out:
+	/* disable core and turn off clocks */
+	flexcan_chip_disable(priv);
+	clk_disable(priv->clk);
+
+	return err;
+}
+
+static void __devexit unregister_flexcandev(struct net_device *dev)
+{
+	unregister_candev(dev);
+}
+
+static int __devinit flexcan_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct flexcan_priv *priv;
+	struct resource *mem;
+	struct clk *clk;
+	void __iomem *base;
+	resource_size_t mem_size;
+	int err, irq;
+
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "no clock defined\n");
+		err = PTR_ERR(clk);
+		goto failed_clock;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!mem || irq <= 0) {
+		err = -ENODEV;
+		goto failed_get;
+	}
+
+	mem_size = resource_size(mem);
+	if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+		err = -EBUSY;
+		goto failed_req;
+	}
+
+	base = ioremap(mem->start, mem_size);
+	if (!base) {
+		err = -ENOMEM;
+		goto failed_map;
+	}
+
+	dev = alloc_candev(sizeof(struct flexcan_priv), 0);
+	if (!dev) {
+		err = -ENOMEM;
+		goto failed_alloc;
+	}
+
+	dev->netdev_ops = &flexcan_netdev_ops;
+	dev->irq = irq;
+	dev->flags |= IFF_ECHO; /* we support local echo in hardware */
+
+	priv = netdev_priv(dev);
+	priv->can.clock.freq = clk_get_rate(clk);
+	priv->can.bittiming_const = &flexcan_bittiming_const;
+	priv->can.do_set_mode = flexcan_set_mode;
+	priv->can.do_get_berr_counter = flexcan_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+		CAN_CTRLMODE_LISTENONLY	| CAN_CTRLMODE_3_SAMPLES |
+		CAN_CTRLMODE_BERR_REPORTING;
+	priv->base = base;
+	priv->dev = dev;
+	priv->clk = clk;
+	priv->pdata = pdev->dev.platform_data;
+
+	netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_flexcandev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering netdev failed\n");
+		goto failed_register;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+		 priv->base, dev->irq);
+
+	return 0;
+
+ failed_register:
+	free_candev(dev);
+ failed_alloc:
+	iounmap(base);
+ failed_map:
+	release_mem_region(mem->start, mem_size);
+ failed_req:
+	clk_put(clk);
+ failed_get:
+ failed_clock:
+	return err;
+}
+
+static int __devexit flexcan_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct flexcan_priv *priv = netdev_priv(dev);
+	struct resource *mem;
+
+	unregister_flexcandev(dev);
+	platform_set_drvdata(pdev, NULL);
+	free_candev(dev);
+	iounmap(priv->base);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
+
+	clk_put(priv->clk);
+
+	return 0;
+}
+
+static struct platform_driver flexcan_driver = {
+	.driver.name = DRV_NAME,
+	.probe = flexcan_probe,
+	.remove = __devexit_p(flexcan_remove),
+};
+
+static int __init flexcan_init(void)
+{
+	pr_info("%s netdevice driver\n", DRV_NAME);
+	return platform_driver_register(&flexcan_driver);
+}
+
+static void __exit flexcan_exit(void)
+{
+	platform_driver_unregister(&flexcan_driver);
+	pr_info("%s: driver removed\n", DRV_NAME);
+}
+
+module_init(flexcan_init);
+module_exit(flexcan_exit);
+
+MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
+	      "Marc Kleine-Budde <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN port driver for flexcan based chip");
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 97ff6fe..0452549 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -7,4 +7,10 @@
 	  This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
 	  from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
 
+config CAN_ESD_USB2
+        tristate "ESD USB/2 CAN/USB interface"
+        ---help---
+          This driver supports the CAN-USB/2 interface
+          from esd electronic system design gmbh (http://www.esd.eu).
+
 endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 0afd51d..fce3cf1 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,5 +3,6 @@
 #
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
+obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
new file mode 100644
index 0000000..05a5275
--- /dev/null
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -0,0 +1,1132 @@
+/*
+ * CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
+MODULE_LICENSE("GPL v2");
+
+/* Define these values to match your devices */
+#define USB_ESDGMBH_VENDOR_ID	0x0ab4
+#define USB_CANUSB2_PRODUCT_ID	0x0010
+
+#define ESD_USB2_CAN_CLOCK	60000000
+#define ESD_USB2_MAX_NETS	2
+
+/* USB2 commands */
+#define CMD_VERSION		1 /* also used for VERSION_REPLY */
+#define CMD_CAN_RX		2 /* device to host only */
+#define CMD_CAN_TX		3 /* also used for TX_DONE */
+#define CMD_SETBAUD		4 /* also used for SETBAUD_REPLY */
+#define CMD_TS			5 /* also used for TS_REPLY */
+#define CMD_IDADD		6 /* also used for IDADD_REPLY */
+
+/* esd CAN message flags - dlc field */
+#define ESD_RTR			0x10
+
+/* esd CAN message flags - id field */
+#define ESD_EXTID		0x20000000
+#define ESD_EVENT		0x40000000
+#define ESD_IDMASK		0x1fffffff
+
+/* esd CAN event ids used by this driver */
+#define ESD_EV_CAN_ERROR_EXT	2
+
+/* baudrate message flags */
+#define ESD_USB2_UBR		0x80000000
+#define ESD_USB2_LOM		0x40000000
+#define ESD_USB2_NO_BAUDRATE	0x7fffffff
+#define ESD_USB2_TSEG1_MIN	1
+#define ESD_USB2_TSEG1_MAX	16
+#define ESD_USB2_TSEG1_SHIFT	16
+#define ESD_USB2_TSEG2_MIN	1
+#define ESD_USB2_TSEG2_MAX	8
+#define ESD_USB2_TSEG2_SHIFT	20
+#define ESD_USB2_SJW_MAX	4
+#define ESD_USB2_SJW_SHIFT	14
+#define ESD_USB2_BRP_MIN	1
+#define ESD_USB2_BRP_MAX	1024
+#define ESD_USB2_BRP_INC	1
+#define ESD_USB2_3_SAMPLES	0x00800000
+
+/* esd IDADD message */
+#define ESD_ID_ENABLE		0x80
+#define ESD_MAX_ID_SEGMENT	64
+
+/* SJA1000 ECC register (emulated by usb2 firmware) */
+#define SJA1000_ECC_SEG		0x1F
+#define SJA1000_ECC_DIR		0x20
+#define SJA1000_ECC_ERR		0x06
+#define SJA1000_ECC_BIT		0x00
+#define SJA1000_ECC_FORM	0x40
+#define SJA1000_ECC_STUFF	0x80
+#define SJA1000_ECC_MASK	0xc0
+
+/* esd bus state event codes */
+#define ESD_BUSSTATE_MASK	0xc0
+#define ESD_BUSSTATE_WARN	0x40
+#define ESD_BUSSTATE_ERRPASSIVE	0x80
+#define ESD_BUSSTATE_BUSOFF	0xc0
+
+#define RX_BUFFER_SIZE		1024
+#define MAX_RX_URBS		4
+#define MAX_TX_URBS		16 /* must be power of 2 */
+
+struct header_msg {
+	u8 len; /* len is always the total message length in 32bit words */
+	u8 cmd;
+	u8 rsvd[2];
+};
+
+struct version_msg {
+	u8 len;
+	u8 cmd;
+	u8 rsvd;
+	u8 flags;
+	__le32 drv_version;
+};
+
+struct version_reply_msg {
+	u8 len;
+	u8 cmd;
+	u8 nets;
+	u8 features;
+	__le32 version;
+	u8 name[16];
+	__le32 rsvd;
+	__le32 ts;
+};
+
+struct rx_msg {
+	u8 len;
+	u8 cmd;
+	u8 net;
+	u8 dlc;
+	__le32 ts;
+	__le32 id; /* upper 3 bits contain flags */
+	u8 data[8];
+};
+
+struct tx_msg {
+	u8 len;
+	u8 cmd;
+	u8 net;
+	u8 dlc;
+	__le32 hnd;
+	__le32 id; /* upper 3 bits contain flags */
+	u8 data[8];
+};
+
+struct tx_done_msg {
+	u8 len;
+	u8 cmd;
+	u8 net;
+	u8 status;
+	__le32 hnd;
+	__le32 ts;
+};
+
+struct id_filter_msg {
+	u8 len;
+	u8 cmd;
+	u8 net;
+	u8 option;
+	__le32 mask[ESD_MAX_ID_SEGMENT + 1];
+};
+
+struct set_baudrate_msg {
+	u8 len;
+	u8 cmd;
+	u8 net;
+	u8 rsvd;
+	__le32 baud;
+};
+
+/* Main message type used between library and application */
+struct __attribute__ ((packed)) esd_usb2_msg {
+	union {
+		struct header_msg hdr;
+		struct version_msg version;
+		struct version_reply_msg version_reply;
+		struct rx_msg rx;
+		struct tx_msg tx;
+		struct tx_done_msg txdone;
+		struct set_baudrate_msg setbaud;
+		struct id_filter_msg filter;
+	} msg;
+};
+
+static struct usb_device_id esd_usb2_table[] = {
+	{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
+	{}
+};
+MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+
+struct esd_usb2_net_priv;
+
+struct esd_tx_urb_context {
+	struct esd_usb2_net_priv *priv;
+	u32 echo_index;
+	int dlc;
+};
+
+struct esd_usb2 {
+	struct usb_device *udev;
+	struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+
+	struct usb_anchor rx_submitted;
+
+	int net_count;
+	u32 version;
+	int rxinitdone;
+};
+
+struct esd_usb2_net_priv {
+	struct can_priv can; /* must be the first member */
+
+	atomic_t active_tx_jobs;
+	struct usb_anchor tx_submitted;
+	struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+	int open_time;
+	struct esd_usb2 *usb2;
+	struct net_device *netdev;
+	int index;
+	u8 old_state;
+	struct can_berr_counter bec;
+};
+
+static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
+			      struct esd_usb2_msg *msg)
+{
+	struct net_device_stats *stats = &priv->netdev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
+
+	if (id == ESD_EV_CAN_ERROR_EXT) {
+		u8 state = msg->msg.rx.data[0];
+		u8 ecc = msg->msg.rx.data[1];
+		u8 txerr = msg->msg.rx.data[2];
+		u8 rxerr = msg->msg.rx.data[3];
+
+		skb = alloc_can_err_skb(priv->netdev, &cf);
+		if (skb == NULL) {
+			stats->rx_dropped++;
+			return;
+		}
+
+		if (state != priv->old_state) {
+			priv->old_state = state;
+
+			switch (state & ESD_BUSSTATE_MASK) {
+			case ESD_BUSSTATE_BUSOFF:
+				priv->can.state = CAN_STATE_BUS_OFF;
+				cf->can_id |= CAN_ERR_BUSOFF;
+				can_bus_off(priv->netdev);
+				break;
+			case ESD_BUSSTATE_WARN:
+				priv->can.state = CAN_STATE_ERROR_WARNING;
+				priv->can.can_stats.error_warning++;
+				break;
+			case ESD_BUSSTATE_ERRPASSIVE:
+				priv->can.state = CAN_STATE_ERROR_PASSIVE;
+				priv->can.can_stats.error_passive++;
+				break;
+			default:
+				priv->can.state = CAN_STATE_ERROR_ACTIVE;
+				break;
+			}
+		} else {
+			priv->can.can_stats.bus_error++;
+			stats->rx_errors++;
+
+			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+			switch (ecc & SJA1000_ECC_MASK) {
+			case SJA1000_ECC_BIT:
+				cf->data[2] |= CAN_ERR_PROT_BIT;
+				break;
+			case SJA1000_ECC_FORM:
+				cf->data[2] |= CAN_ERR_PROT_FORM;
+				break;
+			case SJA1000_ECC_STUFF:
+				cf->data[2] |= CAN_ERR_PROT_STUFF;
+				break;
+			default:
+				cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+				cf->data[3] = ecc & SJA1000_ECC_SEG;
+				break;
+			}
+
+			/* Error occured during transmission? */
+			if (!(ecc & SJA1000_ECC_DIR))
+				cf->data[2] |= CAN_ERR_PROT_TX;
+
+			if (priv->can.state == CAN_STATE_ERROR_WARNING ||
+			    priv->can.state == CAN_STATE_ERROR_PASSIVE) {
+				cf->data[1] = (txerr > rxerr) ?
+					CAN_ERR_CRTL_TX_PASSIVE :
+					CAN_ERR_CRTL_RX_PASSIVE;
+			}
+			cf->data[6] = txerr;
+			cf->data[7] = rxerr;
+		}
+
+		netif_rx(skb);
+
+		priv->bec.txerr = txerr;
+		priv->bec.rxerr = rxerr;
+
+		stats->rx_packets++;
+		stats->rx_bytes += cf->can_dlc;
+	}
+}
+
+static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
+				struct esd_usb2_msg *msg)
+{
+	struct net_device_stats *stats = &priv->netdev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	int i;
+	u32 id;
+
+	if (!netif_device_present(priv->netdev))
+		return;
+
+	id = le32_to_cpu(msg->msg.rx.id);
+
+	if (id & ESD_EVENT) {
+		esd_usb2_rx_event(priv, msg);
+	} else {
+		skb = alloc_can_skb(priv->netdev, &cf);
+		if (skb == NULL) {
+			stats->rx_dropped++;
+			return;
+		}
+
+		cf->can_id = id & ESD_IDMASK;
+		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+
+		if (id & ESD_EXTID)
+			cf->can_id |= CAN_EFF_FLAG;
+
+		if (msg->msg.rx.dlc & ESD_RTR) {
+			cf->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < cf->can_dlc; i++)
+				cf->data[i] = msg->msg.rx.data[i];
+		}
+
+		netif_rx(skb);
+
+		stats->rx_packets++;
+		stats->rx_bytes += cf->can_dlc;
+	}
+
+	return;
+}
+
+static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
+				 struct esd_usb2_msg *msg)
+{
+	struct net_device_stats *stats = &priv->netdev->stats;
+	struct net_device *netdev = priv->netdev;
+	struct esd_tx_urb_context *context;
+
+	if (!netif_device_present(netdev))
+		return;
+
+	context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)];
+
+	if (!msg->msg.txdone.status) {
+		stats->tx_packets++;
+		stats->tx_bytes += context->dlc;
+		can_get_echo_skb(netdev, context->echo_index);
+	} else {
+		stats->tx_errors++;
+		can_free_echo_skb(netdev, context->echo_index);
+	}
+
+	/* Release context */
+	context->echo_index = MAX_TX_URBS;
+	atomic_dec(&priv->active_tx_jobs);
+
+	netif_wake_queue(netdev);
+}
+
+static void esd_usb2_read_bulk_callback(struct urb *urb)
+{
+	struct esd_usb2 *dev = urb->context;
+	int retval;
+	int pos = 0;
+	int i;
+
+	switch (urb->status) {
+	case 0: /* success */
+		break;
+
+	case -ENOENT:
+	case -ESHUTDOWN:
+		return;
+
+	default:
+		dev_info(dev->udev->dev.parent,
+			 "Rx URB aborted (%d)\n", urb->status);
+		goto resubmit_urb;
+	}
+
+	while (pos < urb->actual_length) {
+		struct esd_usb2_msg *msg;
+
+		msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+
+		switch (msg->msg.hdr.cmd) {
+		case CMD_CAN_RX:
+			esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+			break;
+
+		case CMD_CAN_TX:
+			esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
+					     msg);
+			break;
+		}
+
+		pos += msg->msg.hdr.len << 2;
+
+		if (pos > urb->actual_length) {
+			dev_err(dev->udev->dev.parent, "format error\n");
+			break;
+		}
+	}
+
+resubmit_urb:
+	usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
+			  urb->transfer_buffer, RX_BUFFER_SIZE,
+			  esd_usb2_read_bulk_callback, dev);
+
+	retval = usb_submit_urb(urb, GFP_ATOMIC);
+	if (retval == -ENODEV) {
+		for (i = 0; i < dev->net_count; i++) {
+			if (dev->nets[i])
+				netif_device_detach(dev->nets[i]->netdev);
+		}
+	} else if (retval) {
+		dev_err(dev->udev->dev.parent,
+			"failed resubmitting read bulk urb: %d\n", retval);
+	}
+
+	return;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void esd_usb2_write_bulk_callback(struct urb *urb)
+{
+	struct esd_tx_urb_context *context = urb->context;
+	struct esd_usb2_net_priv *priv;
+	struct esd_usb2 *dev;
+	struct net_device *netdev;
+	size_t size = sizeof(struct esd_usb2_msg);
+
+	WARN_ON(!context);
+
+	priv = context->priv;
+	netdev = priv->netdev;
+	dev = priv->usb2;
+
+	/* free up our allocated buffer */
+	usb_free_coherent(urb->dev, size,
+			  urb->transfer_buffer, urb->transfer_dma);
+
+	if (!netif_device_present(netdev))
+		return;
+
+	if (urb->status)
+		dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
+			 urb->status);
+
+	netdev->trans_start = jiffies;
+}
+
+static ssize_t show_firmware(struct device *d,
+			     struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(d);
+	struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+	return sprintf(buf, "%d.%d.%d\n",
+		       (dev->version >> 12) & 0xf,
+		       (dev->version >> 8) & 0xf,
+		       dev->version & 0xff);
+}
+static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL);
+
+static ssize_t show_hardware(struct device *d,
+			     struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(d);
+	struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+	return sprintf(buf, "%d.%d.%d\n",
+		       (dev->version >> 28) & 0xf,
+		       (dev->version >> 24) & 0xf,
+		       (dev->version >> 16) & 0xff);
+}
+static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
+
+static ssize_t show_nets(struct device *d,
+			 struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(d);
+	struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+	return sprintf(buf, "%d", dev->net_count);
+}
+static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL);
+
+static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+{
+	int actual_length;
+
+	return usb_bulk_msg(dev->udev,
+			    usb_sndbulkpipe(dev->udev, 2),
+			    msg,
+			    msg->msg.hdr.len << 2,
+			    &actual_length,
+			    1000);
+}
+
+static int esd_usb2_wait_msg(struct esd_usb2 *dev,
+			     struct esd_usb2_msg *msg)
+{
+	int actual_length;
+
+	return usb_bulk_msg(dev->udev,
+			    usb_rcvbulkpipe(dev->udev, 1),
+			    msg,
+			    sizeof(*msg),
+			    &actual_length,
+			    1000);
+}
+
+static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+{
+	int i, err = 0;
+
+	if (dev->rxinitdone)
+		return 0;
+
+	for (i = 0; i < MAX_RX_URBS; i++) {
+		struct urb *urb = NULL;
+		u8 *buf = NULL;
+
+		/* create a URB, and a buffer for it */
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			dev_warn(dev->udev->dev.parent,
+				 "No memory left for URBs\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+					 &urb->transfer_dma);
+		if (!buf) {
+			dev_warn(dev->udev->dev.parent,
+				 "No memory left for USB buffer\n");
+			err = -ENOMEM;
+			goto freeurb;
+		}
+
+		usb_fill_bulk_urb(urb, dev->udev,
+				  usb_rcvbulkpipe(dev->udev, 1),
+				  buf, RX_BUFFER_SIZE,
+				  esd_usb2_read_bulk_callback, dev);
+		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+		usb_anchor_urb(urb, &dev->rx_submitted);
+
+		err = usb_submit_urb(urb, GFP_KERNEL);
+		if (err) {
+			usb_unanchor_urb(urb);
+			usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+					  urb->transfer_dma);
+		}
+
+freeurb:
+		/* Drop reference, USB core will take care of freeing it */
+		usb_free_urb(urb);
+		if (err)
+			break;
+	}
+
+	/* Did we submit any URBs */
+	if (i == 0) {
+		dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n");
+		return err;
+	}
+
+	/* Warn if we've couldn't transmit all the URBs */
+	if (i < MAX_RX_URBS) {
+		dev_warn(dev->udev->dev.parent,
+			 "rx performance may be slow\n");
+	}
+
+	dev->rxinitdone = 1;
+	return 0;
+}
+
+/*
+ * Start interface
+ */
+static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+{
+	struct esd_usb2 *dev = priv->usb2;
+	struct net_device *netdev = priv->netdev;
+	struct esd_usb2_msg msg;
+	int err, i;
+
+	/*
+	 * Enable all IDs
+	 * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
+	 * Each bit represents one 11 bit CAN identifier. A set bit
+	 * enables reception of the corresponding CAN identifier. A cleared
+	 * bit disabled this identifier. An additional bitmask value
+	 * following the CAN 2.0A bits is used to enable reception of
+	 * extended CAN frames. Only the LSB of this final mask is checked
+	 * for the complete 29 bit ID range. The IDADD message also allows
+	 * filter configuration for an ID subset. In this case you can add
+	 * the number of the starting bitmask (0..64) to the filter.option
+	 * field followed by only some bitmasks.
+	 */
+	msg.msg.hdr.cmd = CMD_IDADD;
+	msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+	msg.msg.filter.net = priv->index;
+	msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+	for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
+		msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+	/* enable 29bit extended IDs */
+	msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+
+	err = esd_usb2_send_msg(dev, &msg);
+	if (err)
+		goto failed;
+
+	err = esd_usb2_setup_rx_urbs(dev);
+	if (err)
+		goto failed;
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	return 0;
+
+failed:
+	if (err == -ENODEV)
+		netif_device_detach(netdev);
+
+	dev_err(netdev->dev.parent, "couldn't start device: %d\n", err);
+
+	return err;
+}
+
+static void unlink_all_urbs(struct esd_usb2 *dev)
+{
+	struct esd_usb2_net_priv *priv;
+	int i;
+
+	usb_kill_anchored_urbs(&dev->rx_submitted);
+	for (i = 0; i < dev->net_count; i++) {
+		priv = dev->nets[i];
+		if (priv) {
+			usb_kill_anchored_urbs(&priv->tx_submitted);
+			atomic_set(&priv->active_tx_jobs, 0);
+
+			for (i = 0; i < MAX_TX_URBS; i++)
+				priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+		}
+	}
+}
+
+static int esd_usb2_open(struct net_device *netdev)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+	int err;
+
+	/* common open */
+	err = open_candev(netdev);
+	if (err)
+		return err;
+
+	/* finally start device */
+	err = esd_usb2_start(priv);
+	if (err) {
+		dev_warn(netdev->dev.parent,
+			 "couldn't start device: %d\n", err);
+		close_candev(netdev);
+		return err;
+	}
+
+	priv->open_time = jiffies;
+
+	netif_start_queue(netdev);
+
+	return 0;
+}
+
+static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+				      struct net_device *netdev)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+	struct esd_usb2 *dev = priv->usb2;
+	struct esd_tx_urb_context *context = NULL;
+	struct net_device_stats *stats = &netdev->stats;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	struct esd_usb2_msg *msg;
+	struct urb *urb;
+	u8 *buf;
+	int i, err;
+	int ret = NETDEV_TX_OK;
+	size_t size = sizeof(struct esd_usb2_msg);
+
+	if (can_dropped_invalid_skb(netdev, skb))
+		return NETDEV_TX_OK;
+
+	/* create a URB, and a buffer for it, and copy the data to the URB */
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		dev_err(netdev->dev.parent, "No memory left for URBs\n");
+		stats->tx_dropped++;
+		dev_kfree_skb(skb);
+		goto nourbmem;
+	}
+
+	buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
+				 &urb->transfer_dma);
+	if (!buf) {
+		dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+		stats->tx_dropped++;
+		dev_kfree_skb(skb);
+		goto nobufmem;
+	}
+
+	msg = (struct esd_usb2_msg *)buf;
+
+	msg->msg.hdr.len = 3; /* minimal length */
+	msg->msg.hdr.cmd = CMD_CAN_TX;
+	msg->msg.tx.net = priv->index;
+	msg->msg.tx.dlc = cf->can_dlc;
+	msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		msg->msg.tx.dlc |= ESD_RTR;
+
+	if (cf->can_id & CAN_EFF_FLAG)
+		msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
+
+	for (i = 0; i < cf->can_dlc; i++)
+		msg->msg.tx.data[i] = cf->data[i];
+
+	msg->msg.hdr.len += (cf->can_dlc + 3) >> 2;
+
+	for (i = 0; i < MAX_TX_URBS; i++) {
+		if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+			context = &priv->tx_contexts[i];
+			break;
+		}
+	}
+
+	/*
+	 * This may never happen.
+	 */
+	if (!context) {
+		dev_warn(netdev->dev.parent, "couldn't find free context\n");
+		ret = NETDEV_TX_BUSY;
+		goto releasebuf;
+	}
+
+	context->priv = priv;
+	context->echo_index = i;
+	context->dlc = cf->can_dlc;
+
+	/* hnd must not be 0 - MSB is stripped in txdone handling */
+	msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
+
+	usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
+			  msg->msg.hdr.len << 2,
+			  esd_usb2_write_bulk_callback, context);
+
+	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	usb_anchor_urb(urb, &priv->tx_submitted);
+
+	can_put_echo_skb(skb, netdev, context->echo_index);
+
+	atomic_inc(&priv->active_tx_jobs);
+
+	/* Slow down tx path */
+	if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS)
+		netif_stop_queue(netdev);
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err) {
+		can_free_echo_skb(netdev, context->echo_index);
+
+		atomic_dec(&priv->active_tx_jobs);
+		usb_unanchor_urb(urb);
+
+		stats->tx_dropped++;
+
+		if (err == -ENODEV)
+			netif_device_detach(netdev);
+		else
+			dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+
+		goto releasebuf;
+	}
+
+	netdev->trans_start = jiffies;
+
+	/*
+	 * Release our reference to this URB, the USB core will eventually free
+	 * it entirely.
+	 */
+	usb_free_urb(urb);
+
+	return NETDEV_TX_OK;
+
+releasebuf:
+	usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+
+nobufmem:
+	usb_free_urb(urb);
+
+nourbmem:
+	return ret;
+}
+
+static int esd_usb2_close(struct net_device *netdev)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+	struct esd_usb2_msg msg;
+	int i;
+
+	/* Disable all IDs (see esd_usb2_start()) */
+	msg.msg.hdr.cmd = CMD_IDADD;
+	msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+	msg.msg.filter.net = priv->index;
+	msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+	for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
+		msg.msg.filter.mask[i] = 0;
+	if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+		dev_err(netdev->dev.parent, "sending idadd message failed\n");
+
+	/* set CAN controller to reset mode */
+	msg.msg.hdr.len = 2;
+	msg.msg.hdr.cmd = CMD_SETBAUD;
+	msg.msg.setbaud.net = priv->index;
+	msg.msg.setbaud.rsvd = 0;
+	msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
+	if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+		dev_err(netdev->dev.parent, "sending setbaud message failed\n");
+
+	priv->can.state = CAN_STATE_STOPPED;
+
+	netif_stop_queue(netdev);
+
+	close_candev(netdev);
+
+	priv->open_time = 0;
+
+	return 0;
+}
+
+static const struct net_device_ops esd_usb2_netdev_ops = {
+	.ndo_open = esd_usb2_open,
+	.ndo_stop = esd_usb2_close,
+	.ndo_start_xmit = esd_usb2_start_xmit,
+};
+
+static struct can_bittiming_const esd_usb2_bittiming_const = {
+	.name = "esd_usb2",
+	.tseg1_min = ESD_USB2_TSEG1_MIN,
+	.tseg1_max = ESD_USB2_TSEG1_MAX,
+	.tseg2_min = ESD_USB2_TSEG2_MIN,
+	.tseg2_max = ESD_USB2_TSEG2_MAX,
+	.sjw_max = ESD_USB2_SJW_MAX,
+	.brp_min = ESD_USB2_BRP_MIN,
+	.brp_max = ESD_USB2_BRP_MAX,
+	.brp_inc = ESD_USB2_BRP_INC,
+};
+
+static int esd_usb2_set_bittiming(struct net_device *netdev)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+	struct can_bittiming *bt = &priv->can.bittiming;
+	struct esd_usb2_msg msg;
+	u32 canbtr;
+
+	canbtr = ESD_USB2_UBR;
+	canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
+	canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
+		<< ESD_USB2_SJW_SHIFT;
+	canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
+		   & (ESD_USB2_TSEG1_MAX - 1))
+		<< ESD_USB2_TSEG1_SHIFT;
+	canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1))
+		<< ESD_USB2_TSEG2_SHIFT;
+	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+		canbtr |= ESD_USB2_3_SAMPLES;
+
+	msg.msg.hdr.len = 2;
+	msg.msg.hdr.cmd = CMD_SETBAUD;
+	msg.msg.setbaud.net = priv->index;
+	msg.msg.setbaud.rsvd = 0;
+	msg.msg.setbaud.baud = cpu_to_le32(canbtr);
+
+	dev_info(netdev->dev.parent, "setting BTR=%#x\n", canbtr);
+
+	return esd_usb2_send_msg(priv->usb2, &msg);
+}
+
+static int esd_usb2_get_berr_counter(const struct net_device *netdev,
+				     struct can_berr_counter *bec)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+
+	bec->txerr = priv->bec.txerr;
+	bec->rxerr = priv->bec.rxerr;
+
+	return 0;
+}
+
+static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+	struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+
+	if (!priv->open_time)
+		return -EINVAL;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		netif_wake_queue(netdev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+{
+	struct esd_usb2 *dev = usb_get_intfdata(intf);
+	struct net_device *netdev;
+	struct esd_usb2_net_priv *priv;
+	int err = 0;
+	int i;
+
+	netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+	if (!netdev) {
+		dev_err(&intf->dev, "couldn't alloc candev\n");
+		err = -ENOMEM;
+		goto done;
+	}
+
+	priv = netdev_priv(netdev);
+
+	init_usb_anchor(&priv->tx_submitted);
+	atomic_set(&priv->active_tx_jobs, 0);
+
+	for (i = 0; i < MAX_TX_URBS; i++)
+		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+	priv->usb2 = dev;
+	priv->netdev = netdev;
+	priv->index = index;
+
+	priv->can.state = CAN_STATE_STOPPED;
+	priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+	priv->can.bittiming_const = &esd_usb2_bittiming_const;
+	priv->can.do_set_bittiming = esd_usb2_set_bittiming;
+	priv->can.do_set_mode = esd_usb2_set_mode;
+	priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+	netdev->flags |= IFF_ECHO; /* we support local echo */
+
+	netdev->netdev_ops = &esd_usb2_netdev_ops;
+
+	SET_NETDEV_DEV(netdev, &intf->dev);
+
+	err = register_candev(netdev);
+	if (err) {
+		dev_err(&intf->dev,
+			"couldn't register CAN device: %d\n", err);
+		free_candev(netdev);
+		err = -ENOMEM;
+		goto done;
+	}
+
+	dev->nets[index] = priv;
+	dev_info(netdev->dev.parent, "device %s registered\n", netdev->name);
+
+done:
+	return err;
+}
+
+/*
+ * probe function for new USB2 devices
+ *
+ * check version information and number of available
+ * CAN interfaces
+ */
+static int esd_usb2_probe(struct usb_interface *intf,
+			 const struct usb_device_id *id)
+{
+	struct esd_usb2 *dev;
+	struct esd_usb2_msg msg;
+	int i, err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	dev->udev = interface_to_usbdev(intf);
+
+	init_usb_anchor(&dev->rx_submitted);
+
+	usb_set_intfdata(intf, dev);
+
+	/* query number of CAN interfaces (nets) */
+	msg.msg.hdr.cmd = CMD_VERSION;
+	msg.msg.hdr.len = 2;
+	msg.msg.version.rsvd = 0;
+	msg.msg.version.flags = 0;
+	msg.msg.version.drv_version = 0;
+
+	err = esd_usb2_send_msg(dev, &msg);
+	if (err < 0) {
+		dev_err(&intf->dev, "sending version message failed\n");
+		goto free_dev;
+	}
+
+	err = esd_usb2_wait_msg(dev, &msg);
+	if (err < 0) {
+		dev_err(&intf->dev, "no version message answer\n");
+		goto free_dev;
+	}
+
+	dev->net_count = (int)msg.msg.version_reply.nets;
+	dev->version = le32_to_cpu(msg.msg.version_reply.version);
+
+	if (device_create_file(&intf->dev, &dev_attr_firmware))
+		dev_err(&intf->dev,
+			"Couldn't create device file for firmware\n");
+
+	if (device_create_file(&intf->dev, &dev_attr_hardware))
+		dev_err(&intf->dev,
+			"Couldn't create device file for hardware\n");
+
+	if (device_create_file(&intf->dev, &dev_attr_nets))
+		dev_err(&intf->dev,
+			"Couldn't create device file for nets\n");
+
+	/* do per device probing */
+	for (i = 0; i < dev->net_count; i++)
+		esd_usb2_probe_one_net(intf, i);
+
+	return 0;
+
+free_dev:
+	kfree(dev);
+done:
+	return err;
+}
+
+/*
+ * called by the usb core when the device is removed from the system
+ */
+static void esd_usb2_disconnect(struct usb_interface *intf)
+{
+	struct esd_usb2 *dev = usb_get_intfdata(intf);
+	struct net_device *netdev;
+	int i;
+
+	device_remove_file(&intf->dev, &dev_attr_firmware);
+	device_remove_file(&intf->dev, &dev_attr_hardware);
+	device_remove_file(&intf->dev, &dev_attr_nets);
+
+	usb_set_intfdata(intf, NULL);
+
+	if (dev) {
+		for (i = 0; i < dev->net_count; i++) {
+			if (dev->nets[i]) {
+				netdev = dev->nets[i]->netdev;
+				unregister_netdev(netdev);
+				free_candev(netdev);
+			}
+		}
+		unlink_all_urbs(dev);
+	}
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver esd_usb2_driver = {
+	.name = "esd_usb2",
+	.probe = esd_usb2_probe,
+	.disconnect = esd_usb2_disconnect,
+	.id_table = esd_usb2_table,
+};
+
+static int __init esd_usb2_init(void)
+{
+	int err;
+
+	/* register this driver with the USB subsystem */
+	err = usb_register(&esd_usb2_driver);
+
+	if (err) {
+		err("usb_register failed. Error number %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+module_init(esd_usb2_init);
+
+static void __exit esd_usb2_exit(void)
+{
+	/* deregister this driver with the USB subsystem */
+	usb_deregister(&esd_usb2_driver);
+}
+module_exit(esd_usb2_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 5ecf0bc..0961032 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -40,9 +40,9 @@
 
 #include "cnic_if.h"
 #include "bnx2.h"
-#include "bnx2x_reg.h"
-#include "bnx2x_fw_defs.h"
-#include "bnx2x_hsi.h"
+#include "bnx2x/bnx2x_reg.h"
+#include "bnx2x/bnx2x_fw_defs.h"
+#include "bnx2x/bnx2x_hsi.h"
 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
 #include "cnic.h"
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 95a8ba0..427c451 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -679,14 +679,6 @@
 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
 }
 
-/*
- * Convert a character holding a hex digit to a number.
- */
-static unsigned int hex2int(unsigned char c)
-{
-	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
 /**
  *	get_vpd_params - read VPD parameters from VPD EEPROM
  *	@adapter: adapter to read
@@ -727,15 +719,15 @@
 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
 	} else {
-		p->port_type[0] = hex2int(vpd.port0_data[0]);
-		p->port_type[1] = hex2int(vpd.port1_data[0]);
+		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
+		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
 	}
 
 	for (i = 0; i < 6; i++)
-		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
-				 hex2int(vpd.na_data[2 * i + 1]);
+		p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
+				 hex_to_bin(vpd.na_data[2 * i + 1]);
 	return 0;
 }
 
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 4769c1c..6e562c0 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -482,7 +482,8 @@
 	struct pci_dev *pdev;
 	struct device *pdev_dev;
 	unsigned long registered_device_map;
-	unsigned long flags;
+	unsigned int fn;
+	unsigned int flags;
 
 	const char *name;
 	int msg_enable;
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 0af6d67..c327527 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -67,7 +67,7 @@
 #include "t4fw_api.h"
 #include "l2t.h"
 
-#define DRV_VERSION "1.0.0-ko"
+#define DRV_VERSION "1.3.0-ko"
 #define DRV_DESC "Chelsio T4 Network Driver"
 
 /*
@@ -171,10 +171,20 @@
 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 
-#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
+#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
 
 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
-	CH_DEVICE(0xa000),  /* PE10K */
+	CH_DEVICE(0xa000, 0),  /* PE10K */
+	CH_DEVICE(0x4001, 0),
+	CH_DEVICE(0x4002, 0),
+	CH_DEVICE(0x4003, 0),
+	CH_DEVICE(0x4004, 0),
+	CH_DEVICE(0x4005, 0),
+	CH_DEVICE(0x4006, 0),
+	CH_DEVICE(0x4007, 0),
+	CH_DEVICE(0x4008, 0),
+	CH_DEVICE(0x4009, 0),
+	CH_DEVICE(0x400a, 0),
 	{ 0, }
 };
 
@@ -314,12 +324,13 @@
 	int uc_cnt = netdev_uc_count(dev);
 	int mc_cnt = netdev_mc_count(dev);
 	const struct port_info *pi = netdev_priv(dev);
+	unsigned int mb = pi->adapter->fn;
 
 	/* first do the secondary unicast addresses */
 	netdev_for_each_uc_addr(ha, dev) {
 		addr[naddr++] = ha->addr;
 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
-			ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 					naddr, addr, filt_idx, &uhash, sleep);
 			if (ret < 0)
 				return ret;
@@ -333,7 +344,7 @@
 	netdev_for_each_mc_addr(ha, dev) {
 		addr[naddr++] = ha->addr;
 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
-			ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 					naddr, addr, filt_idx, &mhash, sleep);
 			if (ret < 0)
 				return ret;
@@ -343,7 +354,7 @@
 		}
 	}
 
-	return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
+	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
 				uhash | mhash, sleep);
 }
 
@@ -358,7 +369,7 @@
 
 	ret = set_addr_filters(dev, sleep_ok);
 	if (ret == 0)
-		ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
+		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 				    sleep_ok);
@@ -375,15 +386,16 @@
 {
 	int ret;
 	struct port_info *pi = netdev_priv(dev);
+	unsigned int mb = pi->adapter->fn;
 
 	/*
 	 * We do not set address filters and promiscuity here, the stack does
 	 * that step explicitly.
 	 */
-	ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
+	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
 			    pi->vlan_grp != NULL, true);
 	if (ret == 0) {
-		ret = t4_change_mac(pi->adapter, 0, pi->viid,
+		ret = t4_change_mac(pi->adapter, mb, pi->viid,
 				    pi->xact_addr_filt, dev->dev_addr, true,
 				    true);
 		if (ret >= 0) {
@@ -392,9 +404,10 @@
 		}
 	}
 	if (ret == 0)
-		ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
+		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+				    &pi->link_cfg);
 	if (ret == 0)
-		ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
+		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
 	return ret;
 }
 
@@ -618,8 +631,8 @@
 	for (i = 0; i < pi->rss_size; i++, queues++)
 		rss[i] = q[*queues].rspq.abs_id;
 
-	err = t4_config_rss_range(pi->adapter, 0, pi->viid, 0, pi->rss_size,
-				  rss, pi->rss_size);
+	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+				  pi->rss_size, rss, pi->rss_size);
 	kfree(rss);
 	return err;
 }
@@ -1307,16 +1320,18 @@
 		return -EAGAIN;
 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
 		return -EINVAL;
-	t4_restart_aneg(p->adapter, 0, p->tx_chan);
+	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
 	return 0;
 }
 
 static int identify_port(struct net_device *dev, u32 data)
 {
+	struct adapter *adap = netdev2adap(dev);
+
 	if (data == 0)
 		data = 2;     /* default to 2 seconds */
 
-	return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
+	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
 				data * 5);
 }
 
@@ -1456,7 +1471,8 @@
 	lc->autoneg = cmd->autoneg;
 
 	if (netif_running(dev))
-		return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+				     lc);
 	return 0;
 }
 
@@ -1488,7 +1504,8 @@
 	if (epause->tx_pause)
 		lc->requested_fc |= PAUSE_TX;
 	if (netif_running(dev))
-		return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+				     lc);
 	return 0;
 }
 
@@ -1620,7 +1637,8 @@
 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
-			err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
+			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
+					    &new_idx);
 			if (err)
 				return err;
 		}
@@ -1808,12 +1826,14 @@
 	return err;
 }
 
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
 static int set_tso(struct net_device *dev, u32 value)
 {
 	if (value)
-		dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+		dev->features |= TSO_FLAGS;
 	else
-		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+		dev->features &= ~TSO_FLAGS;
 	return 0;
 }
 
@@ -2494,9 +2514,11 @@
 	lli.adapter_type = adap->params.rev;
 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
-			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
+			(adap->fn * 4));
 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
-			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
+			(adap->fn * 4));
 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
 	lli.fw_vers = adap->params.fw_vers;
@@ -2713,7 +2735,7 @@
 
 	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
-	return t4_enable_vi(adapter, 0, pi->viid, false, false);
+	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
 }
 
 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
@@ -2760,6 +2782,7 @@
 
 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
+	unsigned int mbox;
 	int ret = 0, prtad, devad;
 	struct port_info *pi = netdev_priv(dev);
 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
@@ -2782,11 +2805,12 @@
 		} else
 			return -EINVAL;
 
+		mbox = pi->adapter->fn;
 		if (cmd == SIOCGMIIREG)
-			ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
+			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
 					 data->reg_num, &data->val_out);
 		else
-			ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
+			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
 					 data->reg_num, data->val_in);
 		break;
 	default:
@@ -2808,8 +2832,8 @@
 
 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
 		return -EINVAL;
-	ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
-			    true);
+	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+			    -1, -1, -1, true);
 	if (!ret)
 		dev->mtu = new_mtu;
 	return ret;
@@ -2824,8 +2848,8 @@
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EINVAL;
 
-	ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
-			    addr->sa_data, true, true);
+	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+			    pi->xact_addr_filt, addr->sa_data, true, true);
 	if (ret < 0)
 		return ret;
 
@@ -2839,8 +2863,8 @@
 	struct port_info *pi = netdev_priv(dev);
 
 	pi->vlan_grp = grp;
-	t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
-		      true);
+	t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
+		      grp != NULL, true);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2897,6 +2921,21 @@
 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
 		     (bar0 + MEMWIN2_BASE) | BIR(0) |
 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+	if (adap->vres.ocq.size) {
+		unsigned int start, sz_kb;
+
+		start = pci_resource_start(adap->pdev, 2) +
+			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
+		t4_write_reg(adap,
+			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
+			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
+		t4_write_reg(adap,
+			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+			     adap->vres.ocq.start);
+		t4_read_reg(adap,
+			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+	}
 }
 
 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
@@ -2909,7 +2948,7 @@
 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 			       FW_CMD_REQUEST | FW_CMD_READ);
 	c->retval_len16 = htonl(FW_LEN16(*c));
-	ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
+	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
 	if (ret < 0)
 		return ret;
 
@@ -2925,37 +2964,33 @@
 	}
 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
 			       FW_CMD_REQUEST | FW_CMD_WRITE);
-	ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
+	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
 	if (ret < 0)
 		return ret;
 
-	ret = t4_config_glbl_rss(adap, 0,
+	ret = t4_config_glbl_rss(adap, adap->fn,
 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
 	if (ret < 0)
 		return ret;
 
-	ret = t4_cfg_pfvf(adap, 0, 0, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4,
-			  0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
+			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
 	if (ret < 0)
 		return ret;
 
 	t4_sge_init(adap);
 
-	/* get basic stuff going */
-	ret = t4_early_init(adap, 0);
-	if (ret < 0)
-		return ret;
-
 	/* tweak some settings */
 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
 	v = t4_read_reg(adap, TP_PIO_DATA);
 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
-	setup_memwin(adap);
-	return 0;
+
+	/* get basic stuff going */
+	return t4_early_init(adap, adap->fn);
 }
 
 /*
@@ -2983,7 +3018,7 @@
 		return ret;
 
 	/* contact FW, request master */
-	ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
+	ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
 	if (ret < 0) {
 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
 			ret);
@@ -2991,7 +3026,7 @@
 	}
 
 	/* reset device */
-	ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
+	ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
 	if (ret < 0)
 		goto bye;
 
@@ -3007,7 +3042,7 @@
 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 
 	params[0] = FW_PARAM_DEV(CCLK);
-	ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
+	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
 	if (ret < 0)
 		goto bye;
 	adap->params.vpd.cclk = val[0];
@@ -3018,14 +3053,15 @@
 
 #define FW_PARAM_PFVF(param) \
 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
-	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+	 FW_PARAMS_PARAM_Y(adap->fn))
 
 	params[0] = FW_PARAM_DEV(PORTVEC);
 	params[1] = FW_PARAM_PFVF(L2T_START);
 	params[2] = FW_PARAM_PFVF(L2T_END);
 	params[3] = FW_PARAM_PFVF(FILTER_START);
 	params[4] = FW_PARAM_PFVF(FILTER_END);
-	ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
+	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
 	if (ret < 0)
 		goto bye;
 	port_vec = val[0];
@@ -3040,7 +3076,8 @@
 		params[3] = FW_PARAM_PFVF(TDDP_START);
 		params[4] = FW_PARAM_PFVF(TDDP_END);
 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+				      val);
 		if (ret < 0)
 			goto bye;
 		adap->tids.ntids = val[0];
@@ -3059,7 +3096,8 @@
 		params[3] = FW_PARAM_PFVF(RQ_END);
 		params[4] = FW_PARAM_PFVF(PBL_START);
 		params[5] = FW_PARAM_PFVF(PBL_END);
-		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+				      val);
 		if (ret < 0)
 			goto bye;
 		adap->vres.stag.start = val[0];
@@ -3073,18 +3111,24 @@
 		params[1] = FW_PARAM_PFVF(SQRQ_END);
 		params[2] = FW_PARAM_PFVF(CQ_START);
 		params[3] = FW_PARAM_PFVF(CQ_END);
-		ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
+		params[4] = FW_PARAM_PFVF(OCQ_START);
+		params[5] = FW_PARAM_PFVF(OCQ_END);
+		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+				      val);
 		if (ret < 0)
 			goto bye;
 		adap->vres.qp.start = val[0];
 		adap->vres.qp.size = val[1] - val[0] + 1;
 		adap->vres.cq.start = val[2];
 		adap->vres.cq.size = val[3] - val[2] + 1;
+		adap->vres.ocq.start = val[4];
+		adap->vres.ocq.size = val[5] - val[4] + 1;
 	}
 	if (c.iscsicaps) {
 		params[0] = FW_PARAM_PFVF(ISCSI_START);
 		params[1] = FW_PARAM_PFVF(ISCSI_END);
-		ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
+				      val);
 		if (ret < 0)
 			goto bye;
 		adap->vres.iscsi.start = val[0];
@@ -3122,7 +3166,7 @@
 
 			/* VF numbering starts at 1! */
 			for (vf = 1; vf <= num_vf[pf]; vf++) {
-				ret = t4_cfg_pfvf(adap, 0, pf, vf,
+				ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
 						  VFRES_NEQ, VFRES_NETHCTRL,
 						  VFRES_NIQFLINT, VFRES_NIQ,
 						  VFRES_TC, VFRES_NVI,
@@ -3139,6 +3183,7 @@
 	}
 #endif
 
+	setup_memwin(adap);
 	return 0;
 
 	/*
@@ -3147,7 +3192,7 @@
 	 * commands.
 	 */
 bye:	if (ret != -ETIMEDOUT && ret != -EIO)
-		t4_fw_bye(adap, 0);
+		t4_fw_bye(adap, adap->fn);
 	return ret;
 }
 
@@ -3203,7 +3248,7 @@
 
 	if (t4_wait_dev_ready(adap) < 0)
 		return PCI_ERS_RESULT_DISCONNECT;
-	if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
+	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
 		return PCI_ERS_RESULT_DISCONNECT;
 	adap->flags |= FW_OK;
 	if (adap_init1(adap, &c))
@@ -3212,7 +3257,8 @@
 	for_each_port(adap, i) {
 		struct port_info *p = adap2pinfo(adap, i);
 
-		ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
+		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+				  NULL, NULL);
 		if (ret < 0)
 			return PCI_ERS_RESULT_DISCONNECT;
 		p->viid = ret;
@@ -3221,6 +3267,7 @@
 
 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
 		     adap->params.b_wnd);
+	setup_memwin(adap);
 	if (cxgb_up(adap))
 		return PCI_ERS_RESULT_DISCONNECT;
 	return PCI_ERS_RESULT_RECOVERED;
@@ -3516,10 +3563,10 @@
 			free_netdev(adapter->port[i]);
 		}
 	if (adapter->flags & FW_OK)
-		t4_fw_bye(adapter, 0);
+		t4_fw_bye(adapter, adapter->fn);
 }
 
-#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 
 static int __devinit init_one(struct pci_dev *pdev,
@@ -3539,9 +3586,9 @@
 		return err;
 	}
 
-	/* We control everything through PF 0 */
+	/* We control everything through one PF */
 	func = PCI_FUNC(pdev->devfn);
-	if (func > 0) {
+	if (func != ent->driver_data) {
 		pci_save_state(pdev);        /* to restore SR-IOV later */
 		goto sriov;
 	}
@@ -3587,6 +3634,7 @@
 
 	adapter->pdev = pdev;
 	adapter->pdev_dev = &pdev->dev;
+	adapter->fn = func;
 	adapter->name = pci_name(pdev);
 	adapter->msg_enable = dflt_msg_enable;
 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3625,7 +3673,7 @@
 		netif_tx_stop_all_queues(netdev);
 		netdev->irq = pdev->irq;
 
-		netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+		netdev->features |= NETIF_F_SG | TSO_FLAGS;
 		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 		netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
 		netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3638,7 +3686,7 @@
 	pci_set_drvdata(pdev, adapter);
 
 	if (adapter->flags & FW_OK) {
-		err = t4_port_init(adapter, 0, 0, 0);
+		err = t4_port_init(adapter, func, func, 0);
 		if (err)
 			goto out_free_dev;
 	}
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 0dc0866..85d74e7 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -187,8 +187,12 @@
 	struct cxgb4_range pbl;
 	struct cxgb4_range qp;
 	struct cxgb4_range cq;
+	struct cxgb4_range ocq;
 };
 
+#define OCQ_WIN_OFFSET(pdev, vres) \
+	(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
+
 /*
  * Block of information the LLD provides to ULDs attaching to a device.
  */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 4388f72..bf38cfc 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -931,23 +931,23 @@
 
 	ssi = skb_shinfo(skb);
 	if (ssi->gso_size) {
-		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+		struct cpl_tx_pkt_lso *lso = (void *)wr;
 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 		int l3hdr_len = skb_network_header_len(skb);
 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
 
 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
 				       FW_WR_IMMDLEN(sizeof(*lso)));
-		lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
-				      LSO_FIRST_SLICE | LSO_LAST_SLICE |
-				      LSO_IPV6(v6) |
-				      LSO_ETHHDR_LEN(eth_xtra_len / 4) |
-				      LSO_IPHDR_LEN(l3hdr_len / 4) |
-				      LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
-		lso->ipid_ofst = htons(0);
-		lso->mss = htons(ssi->gso_size);
-		lso->seqno_offset = htonl(0);
-		lso->len = htonl(skb->len);
+		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+					LSO_FIRST_SLICE | LSO_LAST_SLICE |
+					LSO_IPV6(v6) |
+					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+					LSO_IPHDR_LEN(l3hdr_len / 4) |
+					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+		lso->c.ipid_ofst = htons(0);
+		lso->c.mss = htons(ssi->gso_size);
+		lso->c.seqno_offset = htonl(0);
+		lso->c.len = htonl(skb->len);
 		cpl = (void *)(lso + 1);
 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
 			TXPKT_IPHDR_LEN(l3hdr_len) |
@@ -1593,14 +1593,15 @@
 
 	if (csum_ok && (pi->rx_offload & RX_CSO) &&
 	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
-		if (!pkt->ip_frag)
+		if (!pkt->ip_frag) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
-		else {
+			rxq->stats.rx_cso++;
+		} else if (pkt->l2info & htonl(RXF_IP)) {
 			__sum16 c = (__force __sum16)pkt->csum;
 			skb->csum = csum_unfold(c);
 			skb->ip_summed = CHECKSUM_COMPLETE;
+			rxq->stats.rx_cso++;
 		}
-		rxq->stats.rx_cso++;
 	} else
 		skb->ip_summed = CHECKSUM_NONE;
 
@@ -1998,7 +1999,7 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
 			    FW_CMD_WRITE | FW_CMD_EXEC |
-			    FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
+			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
 				 FW_LEN16(c));
 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
@@ -2030,7 +2031,7 @@
 		c.fl0addr = cpu_to_be64(fl->addr);
 	}
 
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
 	if (ret)
 		goto err;
 
@@ -2109,7 +2110,7 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
 			    FW_CMD_WRITE | FW_CMD_EXEC |
-			    FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
+			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
 				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
@@ -2122,7 +2123,7 @@
 				  FW_EQ_ETH_CMD_EQSIZE(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
 	if (ret) {
 		kfree(txq->q.sdesc);
 		txq->q.sdesc = NULL;
@@ -2159,7 +2160,8 @@
 
 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
 			    FW_CMD_WRITE | FW_CMD_EXEC |
-			    FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
+			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
+			    FW_EQ_CTRL_CMD_VFN(0));
 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
 				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
@@ -2173,7 +2175,7 @@
 				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
 	if (ret) {
 		dma_free_coherent(adap->pdev_dev,
 				  nentries * sizeof(struct tx_desc),
@@ -2209,7 +2211,8 @@
 	memset(&c, 0, sizeof(c));
 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
 			    FW_CMD_WRITE | FW_CMD_EXEC |
-			    FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
+			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
+			    FW_EQ_OFLD_CMD_VFN(0));
 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
 				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
@@ -2221,7 +2224,7 @@
 				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
 	if (ret) {
 		kfree(txq->q.sdesc);
 		txq->q.sdesc = NULL;
@@ -2257,8 +2260,8 @@
 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
 	adap->sge.ingr_map[rq->cntxt_id] = NULL;
-	t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
-		   0xffff);
+	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+		   rq->cntxt_id, fl_id, 0xffff);
 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
 			  rq->desc, rq->phys_addr);
 	netif_napi_del(&rq->napi);
@@ -2295,7 +2298,8 @@
 		if (eq->rspq.desc)
 			free_rspq_fl(adap, &eq->rspq, &eq->fl);
 		if (etq->q.desc) {
-			t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
+			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+				       etq->q.cntxt_id);
 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
 			kfree(etq->q.sdesc);
 			free_txq(adap, &etq->q);
@@ -2318,7 +2322,8 @@
 
 		if (q->q.desc) {
 			tasklet_kill(&q->qresume_tsk);
-			t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
+			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+					q->q.cntxt_id);
 			free_tx_desc(adap, &q->q, q->q.in_use, false);
 			kfree(q->q.sdesc);
 			__skb_queue_purge(&q->sendq);
@@ -2332,7 +2337,8 @@
 
 		if (cq->q.desc) {
 			tasklet_kill(&cq->qresume_tsk);
-			t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
+			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+					cq->q.cntxt_id);
 			__skb_queue_purge(&cq->sendq);
 			free_txq(adap, &cq->q);
 		}
@@ -2400,6 +2406,7 @@
  */
 void t4_sge_init(struct adapter *adap)
 {
+	unsigned int i, v;
 	struct sge *s = &adap->sge;
 	unsigned int fl_align_log = ilog2(FL_ALIGN);
 
@@ -2408,8 +2415,10 @@
 			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
 			 RXPKTCPLMODE |
 			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
-	t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
-			 HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
+
+	for (i = v = 0; i < 32; i += 4)
+		v |= (PAGE_SHIFT - 10) << i;
+	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
 #if FL_PG_ORDER > 0
 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index ab46797..9e1a4b4 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -1444,7 +1444,7 @@
 		t4_fatal_err(adap);
 }
 
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW)
 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
 		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
 		CPL_SWITCH | SGE | ULP_TX)
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index e875d09..10a0555 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -135,5 +135,5 @@
 
 #define QINTR_CNT_EN       0x1
 #define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) << 1) & 0x7)
+#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
 #endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index 623932b..a550d0c 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -529,6 +529,8 @@
 	__be32 l2info;
 #define RXF_UDP (1 << 22)
 #define RXF_TCP (1 << 23)
+#define RXF_IP  (1 << 24)
+#define RXF_IP6 (1 << 25)
 	__be16 hdr_len;
 	__be16 err_vec;
 };
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
index bf21c14..0adc5bc 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -232,6 +232,7 @@
 #define  WINDOW_MASK     0x000000ffU
 #define  WINDOW_SHIFT    0
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
+#define PCIE_MEM_ACCESS_OFFSET 0x306c
 
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
 #define  RNPP 0x80000000U
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index ca45df8..0969f2f 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -485,6 +485,8 @@
 	FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
 	FW_PARAMS_PARAM_PFVF_VIID       = 0x24,
 	FW_PARAMS_PARAM_PFVF_CPMASK     = 0x25,
+	FW_PARAMS_PARAM_PFVF_OCQ_START  = 0x26,
+	FW_PARAMS_PARAM_PFVF_OCQ_END    = 0x27,
 };
 
 /*
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 25e14d2..d0824e3 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -298,6 +298,11 @@
 #define EMAC_CTRL_EWCTL		(0x4)
 #define EMAC_CTRL_EWINTTCNT	(0x8)
 
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK	0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL	0x1
+#define EMAC_DM644X_INTMAX_INTVL	(EMAC_DM644X_EWINTCNT_MASK)
+
 /* EMAC MDIO related */
 /* Mask & Control defines */
 #define MDIO_CONTROL_CLKDIV	(0xFF)
@@ -318,8 +323,20 @@
 #define MDIO_CONTROL		(0x04)
 
 /* EMAC DM646X control module registers */
-#define EMAC_DM646X_CMRXINTEN	(0x14)
-#define EMAC_DM646X_CMTXINTEN	(0x18)
+#define EMAC_DM646X_CMINTCTRL	0x0C
+#define EMAC_DM646X_CMRXINTEN	0x14
+#define EMAC_DM646X_CMTXINTEN	0x18
+#define EMAC_DM646X_CMRXINTMAX	0x70
+#define EMAC_DM646X_CMTXINTMAX	0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN		(0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK	(0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT	63
+#define EMAC_DM646X_CMINTMIN_CNT	2
+#define EMAC_DM646X_CMINTMAX_INTVL	(1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL	((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
 
 /* EMAC EOI codes for C0 */
 #define EMAC_DM646X_MAC_EOI_C0_RXEN	(0x01)
@@ -468,6 +485,8 @@
 	u32 duplex; /* Link duplex: 0=Half, 1=Full */
 	u32 rx_buf_size;
 	u32 isr_count;
+	u32 coal_intvl;
+	u32 bus_freq_mhz;
 	u8 rmii_en;
 	u8 version;
 	u32 mac_hash1;
@@ -545,9 +564,11 @@
 
 	/* Print important registers in EMAC */
 	dev_info(emac_dev, "EMAC Basic registers\n");
-	dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
-		emac_ctrl_read(EMAC_CTRL_EWCTL),
-		emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+	if (priv->version == EMAC_VERSION_1) {
+		dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
+			emac_ctrl_read(EMAC_CTRL_EWCTL),
+			emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+	}
 	dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
 		emac_read(EMAC_TXIDVER),
 		((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
@@ -691,6 +712,103 @@
 }
 
 /**
+ * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	coal->rx_coalesce_usecs = priv->coal_intvl;
+	return 0;
+
+}
+
+/**
+ * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+				struct ethtool_coalesce *coal)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 int_ctrl, num_interrupts = 0;
+	u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+	if (!coal->rx_coalesce_usecs)
+		return -EINVAL;
+
+	coal_intvl = coal->rx_coalesce_usecs;
+
+	switch (priv->version) {
+	case EMAC_VERSION_2:
+		int_ctrl =  emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+		prescale = priv->bus_freq_mhz * 4;
+
+		if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+			coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+		if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+			/*
+			 * Interrupt pacer works with 4us Pulse, we can
+			 * throttle further by dilating the 4us pulse.
+			 */
+			addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+			if (addnl_dvdr > 1) {
+				prescale *= addnl_dvdr;
+				if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+							* addnl_dvdr))
+					coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+							* addnl_dvdr);
+			} else {
+				addnl_dvdr = 1;
+				coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+			}
+		}
+
+		num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+		int_ctrl |= EMAC_DM646X_INTPACEEN;
+		int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+		int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+		emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+		emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+		emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+		break;
+	default:
+		int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+		int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+		prescale = coal_intvl * priv->bus_freq_mhz;
+		if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+			prescale = EMAC_DM644X_EWINTCNT_MASK;
+			coal_intvl = prescale / priv->bus_freq_mhz;
+		}
+		emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+		break;
+	}
+
+	printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+	priv->coal_intvl = coal_intvl;
+
+	return 0;
+
+}
+
+
+/**
  * ethtool_ops: DaVinci EMAC Ethtool structure
  *
  * Ethtool support for EMAC adapter
@@ -701,6 +819,8 @@
 	.get_settings = emac_get_settings,
 	.set_settings = emac_set_settings,
 	.get_link = ethtool_op_get_link,
+	.get_coalesce = emac_get_coalesce,
+	.set_coalesce =  emac_set_coalesce,
 };
 
 /**
@@ -1182,8 +1302,8 @@
 	struct net_device *ndev = priv->ndev;
 	u32 cnt;
 
-	if (unlikely(num_tokens && netif_queue_stopped(dev)))
-		netif_start_queue(dev);
+	if (unlikely(num_tokens && netif_queue_stopped(ndev)))
+		netif_start_queue(ndev);
 	for (cnt = 0; cnt < num_tokens; cnt++) {
 		struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
 		if (skb == NULL)
@@ -2148,7 +2268,7 @@
 	struct net_device *ndev = priv->ndev;
 	struct device *emac_dev = &ndev->dev;
 	u32 status = 0;
-	u32 num_pkts = 0;
+	u32 num_tx_pkts = 0, num_rx_pkts = 0;
 
 	/* Check interrupt vectors and call packet processing */
 	status = emac_read(EMAC_MACINVECTOR);
@@ -2159,27 +2279,19 @@
 		mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
 
 	if (status & mask) {
-		num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
+		num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
 					  EMAC_DEF_TX_MAX_SERVICE);
 	} /* TX processing */
 
-	if (num_pkts)
-		return budget;
-
 	mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
 
 	if (priv->version == EMAC_VERSION_2)
 		mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
 
 	if (status & mask) {
-		num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+		num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
 	} /* RX processing */
 
-	if (num_pkts < budget) {
-		napi_complete(napi);
-		emac_int_enable(priv);
-	}
-
 	mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
 	if (priv->version == EMAC_VERSION_2)
 		mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
@@ -2210,9 +2322,12 @@
 				dev_err(emac_dev, "RX Host error %s on ch=%d\n",
 					&emac_rxhost_errcodes[cause][0], ch);
 		}
-	} /* Host error processing */
+	} else if (num_rx_pkts < budget) {
+		napi_complete(napi);
+		emac_int_enable(priv);
+	}
 
-	return num_pkts;
+	return num_rx_pkts;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2437,6 +2552,14 @@
 	/* Start/Enable EMAC hardware */
 	emac_hw_enable(priv);
 
+	/* Enable Interrupt pacing if configured */
+	if (priv->coal_intvl != 0) {
+		struct ethtool_coalesce coal;
+
+		coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+		emac_set_coalesce(ndev, &coal);
+	}
+
 	/* find the first phy */
 	priv->phydev = NULL;
 	if (priv->phy_mask) {
@@ -2677,6 +2800,9 @@
 	priv->int_enable = pdata->interrupt_enable;
 	priv->int_disable = pdata->interrupt_disable;
 
+	priv->coal_intvl = 0;
+	priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
 	emac_dev = &ndev->dev;
 	/* Get EMAC platform data */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 4ea7141..7c07575 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -854,7 +854,7 @@
 	dev = alloc_etherdev(sizeof(*bp));
 	if (!dev) {
 		dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
-		goto err_out;
+		goto err_out_release_mem;
 	}
 
 	/* TODO: Actually, we have some interesting features... */
@@ -911,7 +911,8 @@
 	if (err)
 		dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
 
-	if (dnet_mii_init(bp) != 0)
+	err = dnet_mii_init(bp);
+	if (err)
 		goto err_out_unregister_netdev;
 
 	dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
@@ -936,6 +937,8 @@
 	iounmap(bp->regs);
 err_out_free_dev:
 	free_netdev(dev);
+err_out_release_mem:
+	release_mem_region(mem_base, mem_size);
 err_out:
 	return err;
 }
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 65298a6..99288b9 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -324,18 +324,20 @@
 extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
 	netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
-#define e_err(format, arg...) \
-	netdev_err(adapter->netdev, format, ## arg)
-#define e_info(format, arg...) \
-	netdev_info(adapter->netdev, format, ## arg)
-#define e_warn(format, arg...) \
-	netdev_warn(adapter->netdev, format, ## arg)
-#define e_notice(format, arg...) \
-	netdev_notice(adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+	netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+	netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+	netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_notice(msglvl, format, arg...) \
+	netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
 #define e_dev_info(format, arg...) \
 	dev_info(&adapter->pdev->dev, format, ## arg)
 #define e_dev_warn(format, arg...) \
 	dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+	dev_err(&adapter->pdev->dev, format, ## arg)
 
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d5ff029..f4d0922 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@
 
 	netdev->features &= ~NETIF_F_TSO6;
 
-	e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
+	e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled");
 	adapter->tso_force = true;
 	return 0;
 }
@@ -714,9 +714,9 @@
 		writel(write & test[i], address);
 		read = readl(address);
 		if (read != (write & test[i] & mask)) {
-			e_info("pattern test reg %04X failed: "
-			       "got 0x%08X expected 0x%08X\n",
-			       reg, read, (write & test[i] & mask));
+			e_err(drv, "pattern test reg %04X failed: "
+			      "got 0x%08X expected 0x%08X\n",
+			      reg, read, (write & test[i] & mask));
 			*data = reg;
 			return true;
 		}
@@ -734,7 +734,7 @@
 	writel(write & mask, address);
 	read = readl(address);
 	if ((read & mask) != (write & mask)) {
-		e_err("set/check reg %04X test failed: "
+		e_err(drv, "set/check reg %04X test failed: "
 		      "got 0x%08X expected 0x%08X\n",
 		      reg, (read & mask), (write & mask));
 		*data = reg;
@@ -779,7 +779,7 @@
 	ew32(STATUS, toggle);
 	after = er32(STATUS) & toggle;
 	if (value != after) {
-		e_err("failed STATUS register test got: "
+		e_err(drv, "failed STATUS register test got: "
 		      "0x%08X expected: 0x%08X\n", after, value);
 		*data = 1;
 		return 1;
@@ -894,7 +894,8 @@
 		*data = 1;
 		return -1;
 	}
-	e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
+	e_info(hw, "testing %s interrupt\n", (shared_int ?
+	       "shared" : "unshared"));
 
 	/* Disable all the interrupts */
 	ew32(IMC, 0xFFFFFFFF);
@@ -1561,7 +1562,7 @@
 		u8 forced_speed_duplex = hw->forced_speed_duplex;
 		u8 autoneg = hw->autoneg;
 
-		e_info("offline testing starting\n");
+		e_info(hw, "offline testing starting\n");
 
 		/* Link test performed before hardware reset so autoneg doesn't
 		 * interfere with test result */
@@ -1601,7 +1602,7 @@
 		if (if_running)
 			dev_open(netdev);
 	} else {
-		e_info("online testing starting\n");
+		e_info(hw, "online testing starting\n");
 		/* Online tests */
 		if (e1000_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1695,8 @@
 		wol->supported &= ~WAKE_UCAST;
 
 		if (adapter->wol & E1000_WUFC_EX)
-			e_err("Interface does not support "
-		        "directed (unicast) frame wake-up packets\n");
+			e_err(drv, "Interface does not support directed "
+			      "(unicast) frame wake-up packets\n");
 		break;
 	default:
 		break;
@@ -1726,8 +1727,8 @@
 	switch (hw->device_id) {
 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 		if (wol->wolopts & WAKE_UCAST) {
-			e_err("Interface does not support "
-			      "directed (unicast) frame wake-up packets\n");
+			e_err(drv, "Interface does not support directed "
+			      "(unicast) frame wake-up packets\n");
 			return -EOPNOTSUPP;
 		}
 		break;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 68a8089..02833af 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -275,7 +275,7 @@
 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 	                  netdev);
 	if (err) {
-		e_err("Unable to allocate interrupt Error: %d\n", err);
+		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 	}
 
 	return err;
@@ -657,7 +657,7 @@
 		ew32(WUC, 0);
 
 	if (e1000_init_hw(hw))
-		e_err("Hardware Error\n");
+		e_dev_err("Hardware Error\n");
 	e1000_update_mng_vlan(adapter);
 
 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -925,7 +925,7 @@
 
 	/* initialize eeprom parameters */
 	if (e1000_init_eeprom_params(hw)) {
-		e_err("EEPROM initialization failed\n");
+		e_err(probe, "EEPROM initialization failed\n");
 		goto err_eeprom;
 	}
 
@@ -936,7 +936,7 @@
 
 	/* make sure the EEPROM is good */
 	if (e1000_validate_eeprom_checksum(hw) < 0) {
-		e_err("The EEPROM Checksum Is Not Valid\n");
+		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
 		e1000_dump_eeprom(adapter);
 		/*
 		 * set MAC address to all zeroes to invalidate and temporary
@@ -950,14 +950,14 @@
 	} else {
 		/* copy the MAC address out of the EEPROM */
 		if (e1000_read_mac_addr(hw))
-			e_err("EEPROM Read Error\n");
+			e_err(probe, "EEPROM Read Error\n");
 	}
 	/* don't block initalization here due to bad MAC address */
 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
 
 	if (!is_valid_ether_addr(netdev->perm_addr))
-		e_err("Invalid MAC Address\n");
+		e_err(probe, "Invalid MAC Address\n");
 
 	e1000_get_bus_info(hw);
 
@@ -1047,7 +1047,7 @@
 		goto err_register;
 
 	/* print bus type/speed/width info */
-	e_info("(PCI%s:%dMHz:%d-bit) %pM\n",
+	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
@@ -1059,7 +1059,7 @@
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
-	e_info("Intel(R) PRO/1000 Network Connection\n");
+	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
 
 	cards_found++;
 	return 0;
@@ -1159,7 +1159,7 @@
 	/* identify the MAC */
 
 	if (e1000_set_mac_type(hw)) {
-		e_err("Unknown MAC Type\n");
+		e_err(probe, "Unknown MAC Type\n");
 		return -EIO;
 	}
 
@@ -1192,7 +1192,7 @@
 	adapter->num_rx_queues = 1;
 
 	if (e1000_alloc_queues(adapter)) {
-		e_err("Unable to allocate memory for queues\n");
+		e_err(probe, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
 
@@ -1386,7 +1386,8 @@
 	size = sizeof(struct e1000_buffer) * txdr->count;
 	txdr->buffer_info = vmalloc(size);
 	if (!txdr->buffer_info) {
-		e_err("Unable to allocate memory for the Tx descriptor ring\n");
+		e_err(probe, "Unable to allocate memory for the Tx descriptor "
+		      "ring\n");
 		return -ENOMEM;
 	}
 	memset(txdr->buffer_info, 0, size);
@@ -1401,7 +1402,8 @@
 	if (!txdr->desc) {
 setup_tx_desc_die:
 		vfree(txdr->buffer_info);
-		e_err("Unable to allocate memory for the Tx descriptor ring\n");
+		e_err(probe, "Unable to allocate memory for the Tx descriptor "
+		      "ring\n");
 		return -ENOMEM;
 	}
 
@@ -1409,7 +1411,7 @@
 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
 		void *olddesc = txdr->desc;
 		dma_addr_t olddma = txdr->dma;
-		e_err("txdr align check failed: %u bytes at %p\n",
+		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
 		      txdr->size, txdr->desc);
 		/* Try again, without freeing the previous */
 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
@@ -1427,7 +1429,7 @@
 					  txdr->dma);
 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
 					  olddma);
-			e_err("Unable to allocate aligned memory "
+			e_err(probe, "Unable to allocate aligned memory "
 			      "for the transmit descriptor ring\n");
 			vfree(txdr->buffer_info);
 			return -ENOMEM;
@@ -1460,7 +1462,7 @@
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
 		if (err) {
-			e_err("Allocation for Tx Queue %u failed\n", i);
+			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
 			for (i-- ; i >= 0; i--)
 				e1000_free_tx_resources(adapter,
 							&adapter->tx_ring[i]);
@@ -1580,7 +1582,8 @@
 	size = sizeof(struct e1000_buffer) * rxdr->count;
 	rxdr->buffer_info = vmalloc(size);
 	if (!rxdr->buffer_info) {
-		e_err("Unable to allocate memory for the Rx descriptor ring\n");
+		e_err(probe, "Unable to allocate memory for the Rx descriptor "
+		      "ring\n");
 		return -ENOMEM;
 	}
 	memset(rxdr->buffer_info, 0, size);
@@ -1596,7 +1599,8 @@
 					GFP_KERNEL);
 
 	if (!rxdr->desc) {
-		e_err("Unable to allocate memory for the Rx descriptor ring\n");
+		e_err(probe, "Unable to allocate memory for the Rx descriptor "
+		      "ring\n");
 setup_rx_desc_die:
 		vfree(rxdr->buffer_info);
 		return -ENOMEM;
@@ -1606,7 +1610,7 @@
 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
 		void *olddesc = rxdr->desc;
 		dma_addr_t olddma = rxdr->dma;
-		e_err("rxdr align check failed: %u bytes at %p\n",
+		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
 		      rxdr->size, rxdr->desc);
 		/* Try again, without freeing the previous */
 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
@@ -1615,8 +1619,8 @@
 		if (!rxdr->desc) {
 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
 					  olddma);
-			e_err("Unable to allocate memory for the Rx descriptor "
-			      "ring\n");
+			e_err(probe, "Unable to allocate memory for the Rx "
+			      "descriptor ring\n");
 			goto setup_rx_desc_die;
 		}
 
@@ -1626,8 +1630,8 @@
 					  rxdr->dma);
 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
 					  olddma);
-			e_err("Unable to allocate aligned memory for the Rx "
-			      "descriptor ring\n");
+			e_err(probe, "Unable to allocate aligned memory for "
+			      "the Rx descriptor ring\n");
 			goto setup_rx_desc_die;
 		} else {
 			/* Free old allocation, new allocation was successful */
@@ -1659,7 +1663,7 @@
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
 		if (err) {
-			e_err("Allocation for Rx Queue %u failed\n", i);
+			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
 			for (i-- ; i >= 0; i--)
 				e1000_free_rx_resources(adapter,
 							&adapter->rx_ring[i]);
@@ -2110,7 +2114,7 @@
 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
 
 	if (!mcarray) {
-		e_err("memory allocation failed\n");
+		e_err(probe, "memory allocation failed\n");
 		return;
 	}
 
@@ -2648,7 +2652,8 @@
 		break;
 	default:
 		if (unlikely(net_ratelimit()))
-			e_warn("checksum_partial proto=%x!\n", skb->protocol);
+			e_warn(drv, "checksum_partial proto=%x!\n",
+			       skb->protocol);
 		break;
 	}
 
@@ -2992,7 +2997,8 @@
 				/* fall through */
 				pull_size = min((unsigned int)4, skb->data_len);
 				if (!__pskb_pull_tail(skb, pull_size)) {
-					e_err("__pskb_pull_tail failed.\n");
+					e_err(drv, "__pskb_pull_tail "
+					      "failed.\n");
 					dev_kfree_skb_any(skb);
 					return NETDEV_TX_OK;
 				}
@@ -3140,7 +3146,7 @@
 
 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		e_err("Invalid MTU setting\n");
+		e_err(probe, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
@@ -3148,7 +3154,7 @@
 	switch (hw->mac_type) {
 	case e1000_undefined ... e1000_82542_rev2_1:
 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
-			e_err("Jumbo Frames not supported.\n");
+			e_err(probe, "Jumbo Frames not supported.\n");
 			return -EINVAL;
 		}
 		break;
@@ -3500,7 +3506,7 @@
 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
 			/* detected Tx unit hang */
-			e_err("Detected Tx Unit Hang\n"
+			e_err(drv, "Detected Tx Unit Hang\n"
 			      "  Tx Queue             <%lu>\n"
 			      "  TDH                  <%x>\n"
 			      "  TDT                  <%x>\n"
@@ -3749,7 +3755,7 @@
 
 		/* eth type trans needs skb->data to point to something */
 		if (!pskb_may_pull(skb, ETH_HLEN)) {
-			e_err("pskb_may_pull failed.\n");
+			e_err(drv, "pskb_may_pull failed.\n");
 			dev_kfree_skb(skb);
 			goto next_desc;
 		}
@@ -3874,7 +3880,7 @@
 
 		if (adapter->discarding) {
 			/* All receives must fit into a single buffer */
-			e_info("Receive packet consumed multiple buffers\n");
+			e_dbg("Receive packet consumed multiple buffers\n");
 			/* recycle */
 			buffer_info->skb = skb;
 			if (status & E1000_RXD_STAT_EOP)
@@ -3986,8 +3992,8 @@
 		/* Fix for errata 23, can't cross 64kB boundary */
 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
 			struct sk_buff *oldskb = skb;
-			e_err("skb align check failed: %u bytes at %p\n",
-			      bufsz, skb->data);
+			e_err(rx_err, "skb align check failed: %u bytes at "
+			      "%p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
@@ -4095,8 +4101,8 @@
 		/* Fix for errata 23, can't cross 64kB boundary */
 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
 			struct sk_buff *oldskb = skb;
-			e_err("skb align check failed: %u bytes at %p\n",
-			      bufsz, skb->data);
+			e_err(rx_err, "skb align check failed: %u bytes at "
+			      "%p\n", bufsz, skb->data);
 			/* Try again, without freeing the previous */
 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			/* Failed allocation, critical failure */
@@ -4141,8 +4147,8 @@
 		if (!e1000_check_64k_bound(adapter,
 					(void *)(unsigned long)buffer_info->dma,
 					adapter->rx_buffer_len)) {
-			e_err("dma align check failed: %u bytes at %p\n",
-			      adapter->rx_buffer_len,
+			e_err(rx_err, "dma align check failed: %u bytes at "
+			      "%p\n", adapter->rx_buffer_len,
 			      (void *)(unsigned long)buffer_info->dma);
 			dev_kfree_skb(skb);
 			buffer_info->skb = NULL;
@@ -4355,7 +4361,7 @@
 	int ret_val = pci_set_mwi(adapter->pdev);
 
 	if (ret_val)
-		e_err("Error in setting MWI\n");
+		e_err(probe, "Error in setting MWI\n");
 }
 
 void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4486,7 +4492,7 @@
 	/* Fiber NICs only allow 1000 gbps Full duplex */
 	if ((hw->media_type == e1000_media_type_fiber) &&
 		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
-		e_err("Unsupported Speed/Duplex configuration\n");
+		e_err(probe, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 
@@ -4509,7 +4515,7 @@
 		break;
 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
 	default:
-		e_err("Unsupported Speed/Duplex configuration\n");
+		e_err(probe, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 	return 0;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 9ee133f..f9a31c8 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -348,6 +348,7 @@
 	u32 test_icr;
 
 	u32 msg_enable;
+	unsigned int num_vectors;
 	struct msix_entry *msix_entries;
 	int int_mode;
 	u32 eiac_mask;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 0cd569a..66ed08f 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -312,8 +312,8 @@
 #define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE	0x140E
-#define E1000_KMRNCTRLSTA_HD_CTRL	0x0002
+#define E1000_KMRNCTRLSTA_K1_ENABLE	0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL	0x10   /* Kumeran HD Control */
 
 #define IFE_PHY_EXTENDED_STATUS_CONTROL	0x10
 #define IFE_PHY_SPECIAL_CONTROL		0x11 /* 100BaseTx PHY Special Control */
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 6aa795a..9e9164a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1785,25 +1785,25 @@
 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
 {
 	int err;
-	int numvecs, i;
-
+	int i;
 
 	switch (adapter->int_mode) {
 	case E1000E_INT_MODE_MSIX:
 		if (adapter->flags & FLAG_HAS_MSIX) {
-			numvecs = 3; /* RxQ0, TxQ0 and other */
-			adapter->msix_entries = kcalloc(numvecs,
+			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+			adapter->msix_entries = kcalloc(adapter->num_vectors,
 						      sizeof(struct msix_entry),
 						      GFP_KERNEL);
 			if (adapter->msix_entries) {
-				for (i = 0; i < numvecs; i++)
+				for (i = 0; i < adapter->num_vectors; i++)
 					adapter->msix_entries[i].entry = i;
 
 				err = pci_enable_msix(adapter->pdev,
 						      adapter->msix_entries,
-						      numvecs);
-				if (err == 0)
+						      adapter->num_vectors);
+				if (err == 0) {
 					return;
+				}
 			}
 			/* MSI-X failed, so fall through and try MSI */
 			e_err("Failed to initialize MSI-X interrupts.  "
@@ -1825,6 +1825,9 @@
 		/* Don't do anything; this is the system default */
 		break;
 	}
+
+	/* store the number of vectors being used */
+	adapter->num_vectors = 1;
 }
 
 /**
@@ -1946,7 +1949,14 @@
 	if (adapter->msix_entries)
 		ew32(EIAC_82574, 0);
 	e1e_flush();
-	synchronize_irq(adapter->pdev->irq);
+
+	if (adapter->msix_entries) {
+		int i;
+		for (i = 0; i < adapter->num_vectors; i++)
+			synchronize_irq(adapter->msix_entries[i].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
 }
 
 /**
@@ -3218,12 +3228,6 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 
-	/* DMA latency requirement to workaround early-receive/jumbo issue */
-	if (adapter->flags & FLAG_HAS_ERT)
-		adapter->netdev->pm_qos_req =
-			pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
-				       PM_QOS_DEFAULT_VALUE);
-
 	/* hardware has been reset, we need to reload some things */
 	e1000_configure(adapter);
 
@@ -3287,12 +3291,6 @@
 	e1000_clean_tx_ring(adapter);
 	e1000_clean_rx_ring(adapter);
 
-	if (adapter->flags & FLAG_HAS_ERT) {
-		pm_qos_remove_request(
-			      adapter->netdev->pm_qos_req);
-		adapter->netdev->pm_qos_req = NULL;
-	}
-
 	/*
 	 * TODO: for power management, we could drop the link and
 	 * pci_disable_device here.
@@ -3527,6 +3525,12 @@
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
 		e1000_update_mng_vlan(adapter);
 
+	/* DMA latency requirement to workaround early-receive/jumbo issue */
+	if (adapter->flags & FLAG_HAS_ERT)
+		adapter->netdev->pm_qos_req =
+		                    pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
+		                                       PM_QOS_DEFAULT_VALUE);
+
 	/*
 	 * before we allocate an interrupt, we must be ready to handle it.
 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3631,6 +3635,11 @@
 	if (adapter->flags & FLAG_HAS_AMT)
 		e1000_release_hw_control(adapter);
 
+	if (adapter->flags & FLAG_HAS_ERT) {
+		pm_qos_remove_request(adapter->netdev->pm_qos_req);
+		adapter->netdev->pm_qos_req = NULL;
+	}
+
 	pm_runtime_put_sync(&pdev->dev);
 
 	return 0;
@@ -5650,8 +5659,6 @@
 	if (err)
 		goto err_sw_init;
 
-	err = -EIO;
-
 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 38c282e..6d653c4 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -632,7 +632,7 @@
 {
 }
 
-static int ethoc_mdio_probe(struct net_device *dev)
+static int __devinit ethoc_mdio_probe(struct net_device *dev)
 {
 	struct ethoc *priv = netdev_priv(dev);
 	struct phy_device *phy;
@@ -871,7 +871,7 @@
  * ethoc_probe() - initialize OpenCores ethernet MAC
  * pdev:	platform device
  */
-static int ethoc_probe(struct platform_device *pdev)
+static int __devinit ethoc_probe(struct platform_device *pdev)
 {
 	struct net_device *netdev = NULL;
 	struct resource *res = NULL;
@@ -1080,7 +1080,7 @@
  * ethoc_remove() - shutdown OpenCores ethernet MAC
  * @pdev:	platform device
  */
-static int ethoc_remove(struct platform_device *pdev)
+static int __devexit ethoc_remove(struct platform_device *pdev)
 {
 	struct net_device *netdev = platform_get_drvdata(pdev);
 	struct ethoc *priv = netdev_priv(netdev);
@@ -1121,7 +1121,7 @@
 
 static struct platform_driver ethoc_driver = {
 	.probe   = ethoc_probe,
-	.remove  = ethoc_remove,
+	.remove  = __devexit_p(ethoc_remove),
 	.suspend = ethoc_suspend,
 	.resume  = ethoc_resume,
 	.driver  = {
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 391a553..768b840 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -118,6 +118,8 @@
 #define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
 #define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
 
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+
 /* The FEC stores dest/src/type, data, and checksum for receive packets.
  */
 #define PKT_MAXBUF_SIZE		1518
@@ -1213,8 +1215,7 @@
 	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
 
 	/* Enable interrupts we wish to service */
-	writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
-			fep->hwp + FEC_IMASK);
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 static void
@@ -1233,8 +1234,8 @@
 	/* Whack a reset.  We should wait for this. */
 	writel(1, fep->hwp + FEC_ECNTRL);
 	udelay(10);
-
 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 static int __devinit
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9ef6a9d..4da05b1 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -89,8 +89,10 @@
 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
-#define DEV_HAS_STATISTICS_V2      0x0000600  /* device supports hw statistics version 2 */
-#define DEV_HAS_STATISTICS_V3      0x0000e00  /* device supports hw statistics version 3 */
+#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
+#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
+#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
+#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
@@ -6067,111 +6069,111 @@
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0372),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0373),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x03E5),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x03E6),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x03EE),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x03EF),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0450),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0451),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0452),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0453),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP67 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x054C),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP67 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x054D),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP67 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x054E),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP67 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x054F),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP73 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x07DC),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP73 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x07DD),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP73 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x07DE),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP73 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x07DF),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP77 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0760),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP77 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0761),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP77 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0762),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP77 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0763),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0AB0),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0AB1),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0AB2),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0AB3),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
 	},
 	{	/* MCP89 Ethernet Controller */
 		PCI_DEVICE(0x10DE, 0x0D7D),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
 	},
 	{0,},
 };
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index acbf0d0..ce587f4 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -720,9 +720,10 @@
 		/* Conversion to new PCI API :
 		 * Pages are always aligned and zeroed, no need to it ourself.
 		 * Doc says should be OK for EISA bus as well - Jean II */
-		if ((lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr)) == NULL) {
+		lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
+		if (!lp->page_vaddr_algn) {
 			err = -ENOMEM;
-			goto out2;
+			goto out_mem_ptr;
 		}
 		lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
 
@@ -798,6 +799,7 @@
 		pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
 				    lp->page_vaddr_algn,
 				    virt_to_whatever(dev, lp->page_vaddr_algn));
+out_mem_ptr:
 	if (mem_ptr_virt)
 		iounmap(mem_ptr_virt);
 out2:
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 06251a9..187622f 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -63,6 +63,7 @@
 static s32  igb_reset_init_script_82575(struct e1000_hw *);
 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
 
 static const u16 e1000_82580_rxpbs_table[] =
 	{ 36, 72, 144, 1, 2, 4, 8, 16,
@@ -70,6 +71,35 @@
 #define E1000_82580_RXPBS_TABLE_SIZE \
 	(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
 
+/**
+ *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+	u32 reg = 0;
+	bool ext_mdio = false;
+
+	switch (hw->mac.type) {
+	case e1000_82575:
+	case e1000_82576:
+		reg = rd32(E1000_MDIC);
+		ext_mdio = !!(reg & E1000_MDIC_DEST);
+		break;
+	case e1000_82580:
+	case e1000_i350:
+		reg = rd32(E1000_MDICNFG);
+		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+		break;
+	default:
+		break;
+	}
+	return ext_mdio;
+}
+
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
@@ -130,27 +160,15 @@
 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
 		dev_spec->sgmii_active = true;
-		ctrl_ext |= E1000_CTRL_I2C_ENA;
 		break;
 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
 		hw->phy.media_type = e1000_media_type_internal_serdes;
-		ctrl_ext |= E1000_CTRL_I2C_ENA;
 		break;
 	default:
-		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
 		break;
 	}
 
-	wr32(E1000_CTRL_EXT, ctrl_ext);
-
-	/*
-	 * if using i2c make certain the MDICNFG register is cleared to prevent
-	 * communications from being misrouted to the mdic registers
-	 */
-	if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
-		wr32(E1000_MDICNFG, 0);
-
 	/* Set mta register count */
 	mac->mta_reg_count = 128;
 	/* Set rar entry count */
@@ -228,19 +246,29 @@
 	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 	phy->reset_delay_us      = 100;
 
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+
 	/* PHY function pointers */
 	if (igb_sgmii_active_82575(hw)) {
-		phy->ops.reset              = igb_phy_hw_reset_sgmii_82575;
-		phy->ops.read_reg           = igb_read_phy_reg_sgmii_82575;
-		phy->ops.write_reg          = igb_write_phy_reg_sgmii_82575;
-	} else if (hw->mac.type >= e1000_82580) {
-		phy->ops.reset              = igb_phy_hw_reset;
-		phy->ops.read_reg           = igb_read_phy_reg_82580;
-		phy->ops.write_reg          = igb_write_phy_reg_82580;
+		phy->ops.reset      = igb_phy_hw_reset_sgmii_82575;
+		ctrl_ext |= E1000_CTRL_I2C_ENA;
 	} else {
-		phy->ops.reset              = igb_phy_hw_reset;
-		phy->ops.read_reg           = igb_read_phy_reg_igp;
-		phy->ops.write_reg          = igb_write_phy_reg_igp;
+		phy->ops.reset      = igb_phy_hw_reset;
+		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+	}
+
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+	igb_reset_mdicnfg_82580(hw);
+
+	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+		phy->ops.read_reg   = igb_read_phy_reg_sgmii_82575;
+		phy->ops.write_reg  = igb_write_phy_reg_sgmii_82575;
+	} else if (hw->mac.type >= e1000_82580) {
+		phy->ops.read_reg   = igb_read_phy_reg_82580;
+		phy->ops.write_reg  = igb_write_phy_reg_82580;
+	} else {
+		phy->ops.read_reg   = igb_read_phy_reg_igp;
+		phy->ops.write_reg  = igb_write_phy_reg_igp;
 	}
 
 	/* set lan id */
@@ -400,6 +428,7 @@
 	s32  ret_val = 0;
 	u16 phy_id;
 	u32 ctrl_ext;
+	u32 mdic;
 
 	/*
 	 * For SGMII PHYs, we try the list of possible addresses until
@@ -414,6 +443,29 @@
 		goto out;
 	}
 
+	if (igb_sgmii_uses_mdio_82575(hw)) {
+		switch (hw->mac.type) {
+		case e1000_82575:
+		case e1000_82576:
+			mdic = rd32(E1000_MDIC);
+			mdic &= E1000_MDIC_PHY_MASK;
+			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+			break;
+		case e1000_82580:
+		case e1000_i350:
+			mdic = rd32(E1000_MDICNFG);
+			mdic &= E1000_MDICNFG_PHY_MASK;
+			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+			break;
+		default:
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+			break;
+		}
+		ret_val = igb_get_phy_id(hw);
+		goto out;
+	}
+
 	/* Power on sgmii phy if it is disabled */
 	ctrl_ext = rd32(E1000_CTRL_EXT);
 	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
@@ -1501,6 +1553,43 @@
 }
 
 /**
+ *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 mdicnfg;
+	u16 nvm_data;
+
+	if (hw->mac.type != e1000_82580)
+		goto out;
+	if (!igb_sgmii_active_82575(hw))
+		goto out;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+				   &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	mdicnfg = rd32(E1000_MDICNFG);
+	if (nvm_data & NVM_WORD24_EXT_MDIO)
+		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+	if (nvm_data & NVM_WORD24_COM_MDIO)
+		mdicnfg |= E1000_MDICNFG_COM_MDIO;
+	wr32(E1000_MDICNFG, mdicnfg);
+out:
+	return ret_val;
+}
+
+/**
  *  igb_reset_hw_82580 - Reset hardware
  *  @hw: pointer to the HW structure
  *
@@ -1575,6 +1664,10 @@
 	wr32(E1000_IMC, 0xffffffff);
 	icr = rd32(E1000_ICR);
 
+	ret_val = igb_reset_mdicnfg_82580(hw);
+	if (ret_val)
+		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+
 	/* Install any alternate MAC address into RAR0 */
 	ret_val = igb_check_alt_mac_addr(hw);
 
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 90bc29d..bbd2ec3 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -468,6 +468,11 @@
 
 #define E1000_TIMINCA_16NS_SHIFT 24
 
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
@@ -565,6 +570,10 @@
 
 #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
 
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
 /* Mask bits for fields in Word 0x0f of the NVM */
 #define NVM_WORD0F_PAUSE_MASK       0x3000
 #define NVM_WORD0F_ASM_DIR          0x2000
@@ -698,12 +707,17 @@
 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
 
 /* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
 #define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
 #define E1000_MDIC_PHY_SHIFT 21
 #define E1000_MDIC_OP_WRITE  0x04000000
 #define E1000_MDIC_OP_READ   0x08000000
 #define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
 #define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
 
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9465617..df5dcd2 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1290,7 +1290,13 @@
 	wr32(E1000_IAM, 0);
 	wr32(E1000_IMC, ~0);
 	wrfl();
-	synchronize_irq(adapter->pdev->irq);
+	if (adapter->msix_entries) {
+		int i;
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			synchronize_irq(adapter->msix_entries[i].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
 }
 
 /**
@@ -1722,6 +1728,15 @@
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
 	u32 part_num;
 
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+		     pci_name(pdev), pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
 	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 5e2b2a8..048595b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2751,7 +2751,7 @@
 		dev_info(&pdev->dev,
 			 "PF still in reset state, assigning new address."
 			 " Is the PF interface up?\n");
-		random_ether_addr(hw->mac.addr);
+		dev_hw_addr_random(adapter->netdev, hw->mac.addr);
 	} else {
 		err = hw->mac.ops.read_mac_addr(hw);
 		if (err) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index d67e484..850ca1c 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2848,9 +2848,7 @@
 	unsigned short ss_device = 0x0000;
 	int ret = 0;
 
-	dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
-
-	while (dev != NULL) {
+	for_each_pci_dev(dev) {
 		struct smsc_ircc_subsystem_configuration *conf;
 
 		/*
@@ -2899,7 +2897,6 @@
 					ret = -ENODEV;
 			}
 		}
-		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 	}
 
 	return ret;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index da54b38..dcebc82 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -54,14 +54,14 @@
 				sizeof(((struct ixgbe_adapter *)0)->m), \
 				offsetof(struct ixgbe_adapter, m)
 #define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
-				sizeof(((struct net_device *)0)->m), \
-				offsetof(struct net_device, m) - offsetof(struct net_device, stats)
+				sizeof(((struct rtnl_link_stats64 *)0)->m), \
+				offsetof(struct rtnl_link_stats64, m)
 
 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
-	{"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
-	{"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
-	{"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
-	{"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
+	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
+	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
+	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
+	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
 	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
 	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
 	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -69,27 +69,27 @@
 	{"lsc_int", IXGBE_STAT(lsc_int)},
 	{"tx_busy", IXGBE_STAT(tx_busy)},
 	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
-	{"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
-	{"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
-	{"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
-	{"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
-	{"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
+	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
+	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
+	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
+	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
+	{"multicast", IXGBE_NETDEV_STAT(multicast)},
 	{"broadcast", IXGBE_STAT(stats.bprc)},
 	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
-	{"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
-	{"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
-	{"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
-	{"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
+	{"collisions", IXGBE_NETDEV_STAT(collisions)},
+	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
+	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
+	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
 	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
 	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
 	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
 	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
-	{"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
-	{"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
-	{"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
-	{"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
-	{"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
-	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
+	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
+	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
+	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
+	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
+	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
+	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
 	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
 	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
 	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9203759..7d6a415 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4783,6 +4783,7 @@
 #ifdef CONFIG_IXGBE_DCB
 		/* Default traffic class to use for FCoE */
 		adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+		adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
 	}
@@ -6147,21 +6148,26 @@
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	int txq = smp_processor_id();
 
+#ifdef IXGBE_FCOE
+	if ((skb->protocol == htons(ETH_P_FCOE)) ||
+	    (skb->protocol == htons(ETH_P_FIP))) {
+		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+			txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+			txq += adapter->ring_feature[RING_F_FCOE].mask;
+			return txq;
+		} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+			txq = adapter->fcoe.up;
+			return txq;
+		}
+	}
+#endif
+
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
 		while (unlikely(txq >= dev->real_num_tx_queues))
 			txq -= dev->real_num_tx_queues;
 		return txq;
 	}
 
-#ifdef IXGBE_FCOE
-	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-	    ((skb->protocol == htons(ETH_P_FCOE)) ||
-	     (skb->protocol == htons(ETH_P_FIP)))) {
-		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-		txq += adapter->ring_feature[RING_F_FCOE].mask;
-		return txq;
-	}
-#endif
 	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 		if (skb->priority == TC_PRIO_CONTROL)
 			txq = adapter->ring_feature[RING_F_DCB].indices-1;
@@ -6205,18 +6211,15 @@
 	tx_ring = adapter->tx_ring[skb->queue_mapping];
 
 #ifdef IXGBE_FCOE
-	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-#ifdef CONFIG_IXGBE_DCB
-		/* for FCoE with DCB, we force the priority to what
-		 * was specified by the switch */
-		if ((skb->protocol == htons(ETH_P_FCOE)) ||
-		    (skb->protocol == htons(ETH_P_FIP))) {
-			tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-				      << IXGBE_TX_FLAGS_VLAN_SHIFT);
-			tx_flags |= ((adapter->fcoe.up << 13)
-				     << IXGBE_TX_FLAGS_VLAN_SHIFT);
-		}
-#endif
+	/* for FCoE with DCB, we force the priority to what
+	 * was specified by the switch */
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
+	    (skb->protocol == htons(ETH_P_FCOE) ||
+	     skb->protocol == htons(ETH_P_FIP))) {
+		tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		tx_flags |= ((adapter->fcoe.up << 13)
+			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
 		/* flag for FCoE offloads */
 		if (skb->protocol == htons(ETH_P_FCOE))
 			tx_flags |= IXGBE_TX_FLAGS_FCOE;
@@ -6536,6 +6539,15 @@
 #endif
 	u32 part_num, eec;
 
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+		     pci_name(pdev), pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
 	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
@@ -6549,8 +6561,8 @@
 			err = dma_set_coherent_mask(&pdev->dev,
 						    DMA_BIT_MASK(32));
 			if (err) {
-				e_dev_err("No usable DMA configuration, "
-					  "aborting\n");
+				dev_err(&pdev->dev,
+					"No usable DMA configuration, aborting\n");
 				goto err_dma;
 			}
 		}
@@ -6560,7 +6572,8 @@
 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
 	                                   IORESOURCE_MEM), ixgbe_driver_name);
 	if (err) {
-		e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", err);
 		goto err_pci_reg;
 	}
 
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index af49135..3e291cc 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1463,18 +1463,10 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	struct net_device *v_netdev;
 
 	/* add VID to filter table */
 	if (hw->mac.ops.set_vfta)
 		hw->mac.ops.set_vfta(hw, vid, 0, true);
-	/*
-	 * Copy feature flags from netdev to the vlan netdev for this vid.
-	 * This allows things like TSO to bubble down to our vlan device.
-	 */
-	v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
-	v_netdev->features |= adapter->netdev->features;
-	vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
 }
 
 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2229,7 +2221,7 @@
 	if (err) {
 		dev_info(&pdev->dev,
 		         "PF still in reset state, assigning new address\n");
-		random_ether_addr(hw->mac.addr);
+		dev_hw_addr_random(adapter->netdev, hw->mac.addr);
 	} else {
 		err = hw->mac.ops.init_hw(hw);
 		if (err) {
@@ -3402,7 +3394,6 @@
 	/* setup the private structure */
 	err = ixgbevf_sw_init(adapter);
 
-#ifdef MAX_SKB_FRAGS
 	netdev->features = NETIF_F_SG |
 			   NETIF_F_IP_CSUM |
 			   NETIF_F_HW_VLAN_TX |
@@ -3416,13 +3407,12 @@
 	netdev->vlan_features |= NETIF_F_TSO;
 	netdev->vlan_features |= NETIF_F_TSO6;
 	netdev->vlan_features |= NETIF_F_IP_CSUM;
+	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
 	netdev->vlan_features |= NETIF_F_SG;
 
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-#endif /* MAX_SKB_FRAGS */
-
 	/* The HW MAC address was set and/or determined in sw_init */
 	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 634dad1..928b2b8 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -30,11 +30,19 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/ks8842.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
 
 #define DRV_NAME "ks8842"
 
 /* Timberdale specific Registers */
-#define REG_TIMB_RST	0x1c
+#define REG_TIMB_RST		0x1c
+#define REG_TIMB_FIFO		0x20
+#define REG_TIMB_ISR		0x24
+#define REG_TIMB_IER		0x28
+#define REG_TIMB_IAR		0x2C
+#define REQ_TIMB_DMA_RESUME	0x30
 
 /* KS8842 registers */
 
@@ -77,6 +85,15 @@
 #define IRQ_RX_ERROR	0x0080
 #define ENABLED_IRQS	(IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
 		IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+/* When running via timberdale in DMA mode, the RX interrupt should be
+   enabled in the KS8842, but not in the FPGA IP, since the IP handles
+   RX DMA internally.
+   TX interrupts are not needed it is handled by the FPGA the driver is
+   notified via DMA callbacks.
+*/
+#define ENABLED_IRQS_DMA_IP	(IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
+	IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+#define ENABLED_IRQS_DMA	(ENABLED_IRQS_DMA_IP | IRQ_RX)
 #define REG_ISR		0x02
 #define REG_RXSR	0x04
 #define RXSR_VALID	0x8000
@@ -119,6 +136,28 @@
 #define	MICREL_KS884X		0x01	/* 0=Timeberdale(FPGA), 1=Micrel */
 #define	KS884X_16BIT		0x02	/*  1=16bit, 0=32bit */
 
+#define DMA_BUFFER_SIZE		2048
+
+struct ks8842_tx_dma_ctl {
+	struct dma_chan *chan;
+	struct dma_async_tx_descriptor *adesc;
+	void *buf;
+	struct scatterlist sg;
+	int channel;
+};
+
+struct ks8842_rx_dma_ctl {
+	struct dma_chan *chan;
+	struct dma_async_tx_descriptor *adesc;
+	struct sk_buff  *skb;
+	struct scatterlist sg;
+	struct tasklet_struct tasklet;
+	int channel;
+};
+
+#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
+	 ((adapter)->dma_rx.channel != -1))
+
 struct ks8842_adapter {
 	void __iomem	*hw_addr;
 	int		irq;
@@ -127,8 +166,19 @@
 	spinlock_t	lock; /* spinlock to be interrupt safe */
 	struct work_struct timeout_work;
 	struct net_device *netdev;
+	struct device *dev;
+	struct ks8842_tx_dma_ctl	dma_tx;
+	struct ks8842_rx_dma_ctl	dma_rx;
 };
 
+static void ks8842_dma_rx_cb(void *data);
+static void ks8842_dma_tx_cb(void *data);
+
+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
+{
+	iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
+}
+
 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
 {
 	iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -282,10 +332,6 @@
 	/* restart port auto-negotiation */
 	ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
 
-	if (!(adapter->conf_flags & MICREL_KS884X))
-		/* only advertise 10Mbps */
-		ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
-
 	/* Enable the transmitter */
 	ks8842_enable_tx(adapter);
 
@@ -296,8 +342,19 @@
 	ks8842_write16(adapter, 18, 0xffff, REG_ISR);
 
 	/* enable interrupts */
-	ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
-
+	if (KS8842_USE_DMA(adapter)) {
+		/* When running in DMA Mode the RX interrupt is not enabled in
+		   timberdale because RX data is received by DMA callbacks
+		   it must still be enabled in the KS8842 because it indicates
+		   to timberdale when there is RX data for it's DMA FIFOs */
+		iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
+		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+	} else {
+		if (!(adapter->conf_flags & MICREL_KS884X))
+			iowrite16(ENABLED_IRQS,
+				adapter->hw_addr + REG_TIMB_IER);
+		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+	}
 	/* enable the switch */
 	ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
 }
@@ -370,6 +427,53 @@
 	return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
 }
 
+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct ks8842_adapter *adapter = netdev_priv(netdev);
+	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+	u8 *buf = ctl->buf;
+
+	if (ctl->adesc) {
+		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
+		/* transfer ongoing */
+		return NETDEV_TX_BUSY;
+	}
+
+	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
+
+	/* copy data to the TX buffer */
+	/* the control word, enable IRQ, port 1 and the length */
+	*buf++ = 0x00;
+	*buf++ = 0x01; /* Port 1 */
+	*buf++ = skb->len & 0xff;
+	*buf++ = (skb->len >> 8) & 0xff;
+	skb_copy_from_linear_data(skb, buf, skb->len);
+
+	dma_sync_single_range_for_device(adapter->dev,
+		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
+		DMA_TO_DEVICE);
+
+	/* make sure the length is a multiple of 4 */
+	if (sg_dma_len(&ctl->sg) % 4)
+		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
+
+	ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+		&ctl->sg, 1, DMA_TO_DEVICE,
+		DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+	if (!ctl->adesc)
+		return NETDEV_TX_BUSY;
+
+	ctl->adesc->callback_param = netdev;
+	ctl->adesc->callback = ks8842_dma_tx_cb;
+	ctl->adesc->tx_submit(ctl->adesc);
+
+	netdev->stats.tx_bytes += skb->len;
+
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -421,6 +525,121 @@
 	return NETDEV_TX_OK;
 }
 
+static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
+{
+	netdev_dbg(netdev, "RX error, status: %x\n", status);
+
+	netdev->stats.rx_errors++;
+	if (status & RXSR_TOO_LONG)
+		netdev->stats.rx_length_errors++;
+	if (status & RXSR_CRC_ERROR)
+		netdev->stats.rx_crc_errors++;
+	if (status & RXSR_RUNT)
+		netdev->stats.rx_frame_errors++;
+}
+
+static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
+	int len)
+{
+	netdev_dbg(netdev, "RX packet, len: %d\n", len);
+
+	netdev->stats.rx_packets++;
+	netdev->stats.rx_bytes += len;
+	if (status & RXSR_MULTICAST)
+		netdev->stats.multicast++;
+}
+
+static int __ks8842_start_new_rx_dma(struct net_device *netdev)
+{
+	struct ks8842_adapter *adapter = netdev_priv(netdev);
+	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+	struct scatterlist *sg = &ctl->sg;
+	int err;
+
+	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
+	if (ctl->skb) {
+		sg_init_table(sg, 1);
+		sg_dma_address(sg) = dma_map_single(adapter->dev,
+			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+		err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
+		if (unlikely(err)) {
+			sg_dma_address(sg) = 0;
+			goto out;
+		}
+
+		sg_dma_len(sg) = DMA_BUFFER_SIZE;
+
+		ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+			sg, 1, DMA_FROM_DEVICE,
+			DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+
+		if (!ctl->adesc)
+			goto out;
+
+		ctl->adesc->callback_param = netdev;
+		ctl->adesc->callback = ks8842_dma_rx_cb;
+		ctl->adesc->tx_submit(ctl->adesc);
+	} else {
+		err = -ENOMEM;
+		sg_dma_address(sg) = 0;
+		goto out;
+	}
+
+	return err;
+out:
+	if (sg_dma_address(sg))
+		dma_unmap_single(adapter->dev, sg_dma_address(sg),
+			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+	sg_dma_address(sg) = 0;
+	if (ctl->skb)
+		dev_kfree_skb(ctl->skb);
+
+	ctl->skb = NULL;
+
+	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
+	return err;
+}
+
+static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+{
+	struct net_device *netdev = (struct net_device *)arg;
+	struct ks8842_adapter *adapter = netdev_priv(netdev);
+	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+	struct sk_buff *skb = ctl->skb;
+	dma_addr_t addr = sg_dma_address(&ctl->sg);
+	u32 status;
+
+	ctl->adesc = NULL;
+
+	/* kick next transfer going */
+	__ks8842_start_new_rx_dma(netdev);
+
+	/* now handle the data we got */
+	dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+	status = *((u32 *)skb->data);
+
+	netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+		__func__, status & 0xffff);
+
+	/* check the status */
+	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+		int len = (status >> 16) & 0x7ff;
+
+		ks8842_update_rx_counters(netdev, status, len);
+
+		/* reserve 4 bytes which is the status word */
+		skb_reserve(skb, 4);
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, netdev);
+		netif_rx(skb);
+	} else {
+		ks8842_update_rx_err_counters(netdev, status);
+		dev_kfree_skb(skb);
+	}
+}
+
 static void ks8842_rx_frame(struct net_device *netdev,
 	struct ks8842_adapter *adapter)
 {
@@ -444,13 +663,9 @@
 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
 		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
-		netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len);
 		if (skb) {
 
-			netdev->stats.rx_packets++;
-			netdev->stats.rx_bytes += len;
-			if (status & RXSR_MULTICAST)
-				netdev->stats.multicast++;
+			ks8842_update_rx_counters(netdev, status, len);
 
 			if (adapter->conf_flags & KS884X_16BIT) {
 				u16 *data16 = (u16 *)skb_put(skb, len);
@@ -476,16 +691,8 @@
 			netif_rx(skb);
 		} else
 			netdev->stats.rx_dropped++;
-	} else {
-		netdev_dbg(netdev, "RX error, status: %x\n", status);
-		netdev->stats.rx_errors++;
-		if (status & RXSR_TOO_LONG)
-			netdev->stats.rx_length_errors++;
-		if (status & RXSR_CRC_ERROR)
-			netdev->stats.rx_crc_errors++;
-		if (status & RXSR_RUNT)
-			netdev->stats.rx_frame_errors++;
-	}
+	} else
+		ks8842_update_rx_err_counters(netdev, status);
 
 	/* set high watermark to 3K */
 	ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -540,18 +747,30 @@
 	isr = ks8842_read16(adapter, 18, REG_ISR);
 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
+	/* when running in DMA mode, do not ack RX interrupts, it is handled
+	   internally by timberdale, otherwise it's DMA FIFO:s would stop
+	*/
+	if (KS8842_USE_DMA(adapter))
+		isr &= ~IRQ_RX;
+
 	/* Ack */
 	ks8842_write16(adapter, 18, isr, REG_ISR);
 
+	if (!(adapter->conf_flags & MICREL_KS884X))
+		/* Ack in the timberdale IP as well */
+		iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
+
 	if (!netif_running(netdev))
 		return;
 
 	if (isr & IRQ_LINK_CHANGE)
 		ks8842_update_link_status(netdev, adapter);
 
-	if (isr & (IRQ_RX | IRQ_RX_ERROR))
+	/* should not get IRQ_RX when running DMA mode */
+	if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
 		ks8842_handle_rx(netdev, adapter);
 
+	/* should only happen when in PIO mode */
 	if (isr & IRQ_TX)
 		ks8842_handle_tx(netdev, adapter);
 
@@ -570,8 +789,17 @@
 
 	/* re-enable interrupts, put back the bank selection register */
 	spin_lock_irqsave(&adapter->lock, flags);
-	ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+	if (KS8842_USE_DMA(adapter))
+		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+	else
+		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+	/* Make sure timberdale continues DMA operations, they are stopped while
+	   we are handling the ks8842 because we might change bank */
+	if (KS8842_USE_DMA(adapter))
+		ks8842_resume_dma(adapter);
+
 	spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
@@ -587,8 +815,12 @@
 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
 	if (isr) {
-		/* disable IRQ */
-		ks8842_write16(adapter, 18, 0x00, REG_IER);
+		if (KS8842_USE_DMA(adapter))
+			/* disable all but RX IRQ, since the FPGA relies on it*/
+			ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
+		else
+			/* disable IRQ */
+			ks8842_write16(adapter, 18, 0x00, REG_IER);
 
 		/* schedule tasklet */
 		tasklet_schedule(&adapter->tasklet);
@@ -598,9 +830,151 @@
 
 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
 
+	/* After an interrupt, tell timberdale to continue DMA operations.
+	   DMA is disabled while we are handling the ks8842 because we might
+	   change bank */
+	ks8842_resume_dma(adapter);
+
 	return ret;
 }
 
+static void ks8842_dma_rx_cb(void *data)
+{
+	struct net_device	*netdev = data;
+	struct ks8842_adapter	*adapter = netdev_priv(netdev);
+
+	netdev_dbg(netdev, "RX DMA finished\n");
+	/* schedule tasklet */
+	if (adapter->dma_rx.adesc)
+		tasklet_schedule(&adapter->dma_rx.tasklet);
+}
+
+static void ks8842_dma_tx_cb(void *data)
+{
+	struct net_device		*netdev = data;
+	struct ks8842_adapter		*adapter = netdev_priv(netdev);
+	struct ks8842_tx_dma_ctl	*ctl = &adapter->dma_tx;
+
+	netdev_dbg(netdev, "TX DMA finished\n");
+
+	if (!ctl->adesc)
+		return;
+
+	netdev->stats.tx_packets++;
+	ctl->adesc = NULL;
+
+	if (netif_queue_stopped(netdev))
+		netif_wake_queue(netdev);
+}
+
+static void ks8842_stop_dma(struct ks8842_adapter *adapter)
+{
+	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+	tx_ctl->adesc = NULL;
+	if (tx_ctl->chan)
+		tx_ctl->chan->device->device_control(tx_ctl->chan,
+			DMA_TERMINATE_ALL, 0);
+
+	rx_ctl->adesc = NULL;
+	if (rx_ctl->chan)
+		rx_ctl->chan->device->device_control(rx_ctl->chan,
+			DMA_TERMINATE_ALL, 0);
+
+	if (sg_dma_address(&rx_ctl->sg))
+		dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
+			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+	sg_dma_address(&rx_ctl->sg) = 0;
+
+	dev_kfree_skb(rx_ctl->skb);
+	rx_ctl->skb = NULL;
+}
+
+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
+{
+	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+	ks8842_stop_dma(adapter);
+
+	if (tx_ctl->chan)
+		dma_release_channel(tx_ctl->chan);
+	tx_ctl->chan = NULL;
+
+	if (rx_ctl->chan)
+		dma_release_channel(rx_ctl->chan);
+	rx_ctl->chan = NULL;
+
+	tasklet_kill(&rx_ctl->tasklet);
+
+	if (sg_dma_address(&tx_ctl->sg))
+		dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
+			DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+	sg_dma_address(&tx_ctl->sg) = 0;
+
+	kfree(tx_ctl->buf);
+	tx_ctl->buf = NULL;
+}
+
+static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
+{
+	return chan->chan_id == (long)filter_param;
+}
+
+static int ks8842_alloc_dma_bufs(struct net_device *netdev)
+{
+	struct ks8842_adapter *adapter = netdev_priv(netdev);
+	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+	int err;
+
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_PRIVATE, mask);
+
+	sg_init_table(&tx_ctl->sg, 1);
+
+	tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+					   (void *)(long)tx_ctl->channel);
+	if (!tx_ctl->chan) {
+		err = -ENODEV;
+		goto err;
+	}
+
+	/* allocate DMA buffer */
+	tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
+	if (!tx_ctl->buf) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
+		tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+	err = dma_mapping_error(adapter->dev,
+		sg_dma_address(&tx_ctl->sg));
+	if (err) {
+		sg_dma_address(&tx_ctl->sg) = 0;
+		goto err;
+	}
+
+	rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+					   (void *)(long)rx_ctl->channel);
+	if (!rx_ctl->chan) {
+		err = -ENODEV;
+		goto err;
+	}
+
+	tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
+		(unsigned long)netdev);
+
+	return 0;
+err:
+	ks8842_dealloc_dma_bufs(adapter);
+	return err;
+}
 
 /* Netdevice operations */
 
@@ -611,6 +985,25 @@
 
 	netdev_dbg(netdev, "%s - entry\n", __func__);
 
+	if (KS8842_USE_DMA(adapter)) {
+		err = ks8842_alloc_dma_bufs(netdev);
+
+		if (!err) {
+			/* start RX dma */
+			err = __ks8842_start_new_rx_dma(netdev);
+			if (err)
+				ks8842_dealloc_dma_bufs(adapter);
+		}
+
+		if (err) {
+			printk(KERN_WARNING DRV_NAME
+				": Failed to initiate DMA, running PIO\n");
+			ks8842_dealloc_dma_bufs(adapter);
+			adapter->dma_rx.channel = -1;
+			adapter->dma_tx.channel = -1;
+		}
+	}
+
 	/* reset the HW */
 	ks8842_reset_hw(adapter);
 
@@ -636,6 +1029,9 @@
 
 	cancel_work_sync(&adapter->timeout_work);
 
+	if (KS8842_USE_DMA(adapter))
+		ks8842_dealloc_dma_bufs(adapter);
+
 	/* free the irq */
 	free_irq(adapter->irq, netdev);
 
@@ -653,6 +1049,17 @@
 
 	netdev_dbg(netdev, "%s: entry\n", __func__);
 
+	if (KS8842_USE_DMA(adapter)) {
+		unsigned long flags;
+		ret = ks8842_tx_frame_dma(skb, netdev);
+		/* for now only allow one transfer at the time */
+		spin_lock_irqsave(&adapter->lock, flags);
+		if (adapter->dma_tx.adesc)
+			netif_stop_queue(netdev);
+		spin_unlock_irqrestore(&adapter->lock, flags);
+		return ret;
+	}
+
 	ret = ks8842_tx_frame(skb, netdev);
 
 	if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
@@ -688,6 +1095,10 @@
 	netdev_dbg(netdev, "%s: entry\n", __func__);
 
 	spin_lock_irqsave(&adapter->lock, flags);
+
+	if (KS8842_USE_DMA(adapter))
+		ks8842_stop_dma(adapter);
+
 	/* disable interrupts */
 	ks8842_write16(adapter, 18, 0, REG_IER);
 	ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
@@ -701,6 +1112,9 @@
 	ks8842_write_mac_addr(adapter, netdev->dev_addr);
 
 	ks8842_update_link_status(netdev, adapter);
+
+	if (KS8842_USE_DMA(adapter))
+		__ks8842_start_new_rx_dma(netdev);
 }
 
 static void ks8842_tx_timeout(struct net_device *netdev)
@@ -760,6 +1174,19 @@
 		goto err_get_irq;
 	}
 
+	adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
+
+	/* DMA is only supported when accessed via timberdale */
+	if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
+		(pdata->tx_dma_channel != -1) &&
+		(pdata->rx_dma_channel != -1)) {
+		adapter->dma_rx.channel = pdata->rx_dma_channel;
+		adapter->dma_tx.channel = pdata->tx_dma_channel;
+	} else {
+		adapter->dma_rx.channel = -1;
+		adapter->dma_tx.channel = -1;
+	}
+
 	tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
 	spin_lock_init(&adapter->lock);
 
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index b3c010b..8b32cc1 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -6894,13 +6894,12 @@
 	i = j = num = got_num = 0;
 	while (j < MAC_ADDR_LEN) {
 		if (macaddr[i]) {
+			int digit;
+
 			got_num = 1;
-			if ('0' <= macaddr[i] && macaddr[i] <= '9')
-				num = num * 16 + macaddr[i] - '0';
-			else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
-				num = num * 16 + 10 + macaddr[i] - 'A';
-			else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
-				num = num * 16 + 10 + macaddr[i] - 'a';
+			digit = hex_to_bin(macaddr[i]);
+			if (digit >= 0)
+				num = num * 16 + digit;
 			else if (':' == macaddr[i])
 				got_num = 2;
 			else
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1b28aae..0ef0eb0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -158,7 +158,8 @@
 	const struct macvlan_dev *vlan;
 	const struct macvlan_dev *src;
 	struct net_device *dev;
-	unsigned int len;
+	unsigned int len = 0;
+	int ret = NET_RX_DROP;
 
 	port = macvlan_port_get_rcu(skb->dev);
 	if (is_multicast_ether_addr(eth->h_dest)) {
@@ -195,14 +196,16 @@
 	}
 	len = skb->len + ETH_HLEN;
 	skb = skb_share_check(skb, GFP_ATOMIC);
-	macvlan_count_rx(vlan, len, skb != NULL, 0);
 	if (!skb)
-		return NULL;
+		goto out;
 
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
 
-	vlan->receive(skb);
+	ret = vlan->receive(skb);
+
+out:
+	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
 	return NULL;
 }
 
@@ -515,7 +518,7 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static void macvlan_setup(struct net_device *dev)
+void macvlan_common_setup(struct net_device *dev)
 {
 	ether_setup(dev);
 
@@ -524,6 +527,12 @@
 	dev->destructor		= free_netdev;
 	dev->header_ops		= &macvlan_hard_header_ops,
 	dev->ethtool_ops	= &macvlan_ethtool_ops;
+}
+EXPORT_SYMBOL_GPL(macvlan_common_setup);
+
+static void macvlan_setup(struct net_device *dev)
+{
+	macvlan_common_setup(dev);
 	dev->tx_queue_len	= 0;
 }
 
@@ -735,7 +744,6 @@
 	/* common fields */
 	ops->priv_size		= sizeof(struct macvlan_dev);
 	ops->get_tx_queues	= macvlan_get_tx_queues;
-	ops->setup		= macvlan_setup;
 	ops->validate		= macvlan_validate;
 	ops->maxtype		= IFLA_MACVLAN_MAX;
 	ops->policy		= macvlan_policy;
@@ -749,6 +757,7 @@
 
 static struct rtnl_link_ops macvlan_link_ops = {
 	.kind		= "macvlan",
+	.setup		= macvlan_setup,
 	.newlink	= macvlan_newlink,
 	.dellink	= macvlan_dellink,
 };
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2b4d59b..3b1c54a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -180,11 +180,18 @@
 {
 	struct macvtap_queue *q = macvtap_get_queue(dev, skb);
 	if (!q)
-		return -ENOLINK;
+		goto drop;
+
+	if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
+		goto drop;
 
 	skb_queue_tail(&q->sk.sk_receive_queue, skb);
 	wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
-	return 0;
+	return NET_RX_SUCCESS;
+
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
 }
 
 /*
@@ -235,8 +242,15 @@
 	macvlan_dellink(dev, head);
 }
 
+static void macvtap_setup(struct net_device *dev)
+{
+	macvlan_common_setup(dev);
+	dev->tx_queue_len = TUN_READQ_SIZE;
+}
+
 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
 	.kind		= "macvtap",
+	.setup		= macvtap_setup,
 	.newlink	= macvtap_newlink,
 	.dellink	= macvtap_dellink,
 };
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2fcdb1e..2d488ab 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2675,7 +2675,8 @@
 	 * Detect hardware parameters.
 	 */
 	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
-	msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024;
+	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
+					pd->tx_csum_limit : 9 * 1024;
 	infer_hw_params(msp);
 
 	platform_set_drvdata(pdev, msp);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 78b74e8..721a090 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -69,6 +69,15 @@
 #define MII_M1111_COPPER		0
 #define MII_M1111_FIBER			1
 
+#define MII_88E1121_PHY_MSCR_PAGE	2
+#define MII_88E1121_PHY_MSCR_REG	21
+#define MII_88E1121_PHY_MSCR_RX_DELAY	BIT(5)
+#define MII_88E1121_PHY_MSCR_TX_DELAY	BIT(4)
+#define MII_88E1121_PHY_MSCR_DELAY_MASK	(~(0x3 << 4))
+
+#define MII_88EC048_PHY_MSCR1_REG	16
+#define MII_88EC048_PHY_MSCR1_PAD_ODD	BIT(6)
+
 #define MII_88E1121_PHY_LED_CTRL	16
 #define MII_88E1121_PHY_LED_PAGE	3
 #define MII_88E1121_PHY_LED_DEF		0x0030
@@ -180,7 +189,30 @@
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
 {
-	int err, temp;
+	int err, oldpage, mscr;
+
+	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+
+	err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+			MII_88E1121_PHY_MSCR_PAGE);
+	if (err < 0)
+		return err;
+	mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
+		MII_88E1121_PHY_MSCR_DELAY_MASK;
+
+	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+		mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
+			 MII_88E1121_PHY_MSCR_TX_DELAY);
+	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+		mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+		mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
+
+	err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+	if (err < 0)
+		return err;
+
+	phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
 
 	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
 	if (err < 0)
@@ -191,17 +223,42 @@
 	if (err < 0)
 		return err;
 
-	temp = phy_read(phydev, MII_88E1121_PHY_PAGE);
+	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
 
 	phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
 	phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-	phy_write(phydev, MII_88E1121_PHY_PAGE, temp);
+	phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
 
 	err = genphy_config_aneg(phydev);
 
 	return err;
 }
 
+static int m88ec048_config_aneg(struct phy_device *phydev)
+{
+	int err, oldpage, mscr;
+
+	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+
+	err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+			MII_88E1121_PHY_MSCR_PAGE);
+	if (err < 0)
+		return err;
+
+	mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
+	mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+
+	err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+	if (err < 0)
+		return err;
+
+	return m88e1121_config_aneg(phydev);
+}
+
 static int m88e1111_config_init(struct phy_device *phydev)
 {
 	int err;
@@ -593,6 +650,19 @@
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
+		.phy_id = 0x01410e90,
+		.phy_id_mask = 0xfffffff0,
+		.name = "Marvell 88EC048",
+		.features = PHY_GBIT_FEATURES,
+		.flags = PHY_HAS_INTERRUPT,
+		.config_aneg = &m88ec048_config_aneg,
+		.read_status = &marvell_read_status,
+		.ack_interrupt = &marvell_ack_interrupt,
+		.config_intr = &marvell_config_intr,
+		.did_interrupt = &m88e1121_did_interrupt,
+		.driver = { .owner = THIS_MODULE },
+	},
+	{
 		.phy_id = 0x01410cd0,
 		.phy_id_mask = 0xfffffff0,
 		.name = "Marvell 88E1145",
@@ -657,6 +727,7 @@
 	{ 0x01410cb0, 0xfffffff0 },
 	{ 0x01410cd0, 0xfffffff0 },
 	{ 0x01410e30, 0xfffffff0 },
+	{ 0x01410e90, 0xfffffff0 },
 	{ }
 };
 
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 54ebb65..6168a13 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -5,6 +5,8 @@
  * See LICENSE.qla3xxx for copyright and licensing details.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/types.h>
@@ -36,14 +38,16 @@
 
 #include "qla3xxx.h"
 
-#define DRV_NAME  	"qla3xxx"
-#define DRV_STRING 	"QLogic ISP3XXX Network Driver"
+#define DRV_NAME	"qla3xxx"
+#define DRV_STRING	"QLogic ISP3XXX Network Driver"
 #define DRV_VERSION	"v2.03.00-k5"
-#define PFX		DRV_NAME " "
 
 static const char ql3xxx_driver_name[] = DRV_NAME;
 static const char ql3xxx_driver_version[] = DRV_VERSION;
 
+#define TIMED_OUT_MSG							\
+"Timed out waiting for management port to get free before issuing command\n"
+
 MODULE_AUTHOR("QLogic Corporation");
 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
 MODULE_LICENSE("GPL");
@@ -73,24 +77,24 @@
 /*
  *  These are the known PHY's which are used
  */
-typedef enum {
+enum PHY_DEVICE_TYPE {
    PHY_TYPE_UNKNOWN   = 0,
    PHY_VITESSE_VSC8211,
    PHY_AGERE_ET1011C,
    MAX_PHY_DEV_TYPES
-} PHY_DEVICE_et;
+};
 
-typedef struct {
-	PHY_DEVICE_et phyDevice;
-	u32		phyIdOUI;
-	u16		phyIdModel;
-	char 		*name;
-} PHY_DEVICE_INFO_t;
+struct PHY_DEVICE_INFO {
+	const enum PHY_DEVICE_TYPE	phyDevice;
+	const u32		phyIdOUI;
+	const u16		phyIdModel;
+	const char		*name;
+};
 
-static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
-	{{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
-	 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
-	 {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+	{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+	{PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+	{PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
 };
 
 
@@ -100,7 +104,8 @@
 static int ql_sem_spinlock(struct ql3_adapter *qdev,
 			    u32 sem_mask, u32 sem_bits)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	u32 value;
 	unsigned int seconds = 3;
 
@@ -111,20 +116,22 @@
 		if ((value & (sem_mask >> 16)) == sem_bits)
 			return 0;
 		ssleep(1);
-	} while(--seconds);
+	} while (--seconds);
 	return -1;
 }
 
 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
 	readl(&port_regs->CommonRegs.semaphoreReg);
 }
 
 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	u32 value;
 
 	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
@@ -139,32 +146,28 @@
 {
 	int i = 0;
 
-	while (1) {
-		if (!ql_sem_lock(qdev,
-				 QL_DRVR_SEM_MASK,
-				 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
-				  * 2) << 1)) {
-			if (i < 10) {
-				ssleep(1);
-				i++;
-			} else {
-				printk(KERN_ERR PFX "%s: Timed out waiting for "
-				       "driver lock...\n",
-				       qdev->ndev->name);
-				return 0;
-			}
-		} else {
-			printk(KERN_DEBUG PFX
-			       "%s: driver lock acquired.\n",
-			       qdev->ndev->name);
+	while (i < 10) {
+		if (i)
+			ssleep(1);
+
+		if (ql_sem_lock(qdev,
+				QL_DRVR_SEM_MASK,
+				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+				 * 2) << 1)) {
+			netdev_printk(KERN_DEBUG, qdev->ndev,
+				      "driver lock acquired\n");
 			return 1;
 		}
 	}
+
+	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+	return 0;
 }
 
 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	writel(((ISP_CONTROL_NP_MASK << 16) | page),
 			&port_regs->CommonRegs.ispControlStatus);
@@ -172,8 +175,7 @@
 	qdev->current_page = page;
 }
 
-static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
-			      u32 __iomem * reg)
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
 	u32 value;
 	unsigned long hw_flags;
@@ -185,8 +187,7 @@
 	return value;
 }
 
-static u32 ql_read_common_reg(struct ql3_adapter *qdev,
-			      u32 __iomem * reg)
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
 	return readl(reg);
 }
@@ -199,7 +200,7 @@
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
 	if (qdev->current_page != 0)
-		ql_set_register_page(qdev,0);
+		ql_set_register_page(qdev, 0);
 	value = readl(reg);
 
 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -209,7 +210,7 @@
 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
 	if (qdev->current_page != 0)
-		ql_set_register_page(qdev,0);
+		ql_set_register_page(qdev, 0);
 	return readl(reg);
 }
 
@@ -243,7 +244,7 @@
 			       u32 __iomem *reg, u32 value)
 {
 	if (qdev->current_page != 0)
-		ql_set_register_page(qdev,0);
+		ql_set_register_page(qdev, 0);
 	writel(value, reg);
 	readl(reg);
 }
@@ -255,7 +256,7 @@
 			       u32 __iomem *reg, u32 value)
 {
 	if (qdev->current_page != 1)
-		ql_set_register_page(qdev,1);
+		ql_set_register_page(qdev, 1);
 	writel(value, reg);
 	readl(reg);
 }
@@ -267,14 +268,15 @@
 			       u32 __iomem *reg, u32 value)
 {
 	if (qdev->current_page != 2)
-		ql_set_register_page(qdev,2);
+		ql_set_register_page(qdev, 2);
 	writel(value, reg);
 	readl(reg);
 }
 
 static void ql_disable_interrupts(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 			    (ISP_IMR_ENABLE_INT << 16));
@@ -283,7 +285,8 @@
 
 static void ql_enable_interrupts(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
@@ -308,8 +311,7 @@
 		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
 						   qdev->lrg_buffer_len);
 		if (unlikely(!lrg_buf_cb->skb)) {
-			printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
-			       qdev->ndev->name);
+			netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
 			qdev->lrg_buf_skb_check++;
 		} else {
 			/*
@@ -323,9 +325,10 @@
 					     QL_HEADER_SPACE,
 					     PCI_DMA_FROMDEVICE);
 			err = pci_dma_mapping_error(qdev->pdev, map);
-			if(err) {
-				printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-				       qdev->ndev->name, err);
+			if (err) {
+				netdev_err(qdev->ndev,
+					   "PCI mapping failed with error: %d\n",
+					   err);
 				dev_kfree_skb(lrg_buf_cb->skb);
 				lrg_buf_cb->skb = NULL;
 
@@ -350,10 +353,11 @@
 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
 							   *qdev)
 {
-	struct ql_rcv_buf_cb *lrg_buf_cb;
+	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
 
-	if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
-		if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
+	if (lrg_buf_cb != NULL) {
+		qdev->lrg_buf_free_head = lrg_buf_cb->next;
+		if (qdev->lrg_buf_free_head == NULL)
 			qdev->lrg_buf_free_tail = NULL;
 		qdev->lrg_buf_free_count--;
 	}
@@ -374,13 +378,13 @@
 static void fm93c56a_select(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
+	u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+	ql_write_nvram_reg(qdev, spir,
+			   ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 }
 
 /*
@@ -393,51 +397,40 @@
 	u32 dataBit;
 	u32 previousBit;
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
+	u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
 	/* Clock in a zero, then do the start bit */
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-			    AUBURN_EEPROM_DO_1);
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ISP_NVRAM_MASK | qdev->
-			    eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-			    AUBURN_EEPROM_CLK_RISE);
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ISP_NVRAM_MASK | qdev->
-			    eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-			    AUBURN_EEPROM_CLK_FALL);
+	ql_write_nvram_reg(qdev, spir,
+			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+			    AUBURN_EEPROM_DO_1));
+	ql_write_nvram_reg(qdev, spir,
+			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+	ql_write_nvram_reg(qdev, spir,
+			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
 
 	mask = 1 << (FM93C56A_CMD_BITS - 1);
 	/* Force the previous data bit to be different */
 	previousBit = 0xffff;
 	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
-		dataBit =
-		    (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+		dataBit = (cmd & mask)
+			? AUBURN_EEPROM_DO_1
+			: AUBURN_EEPROM_DO_0;
 		if (previousBit != dataBit) {
-			/*
-			 * If the bit changed, then change the DO state to
-			 * match
-			 */
-			ql_write_nvram_reg(qdev,
-					    &port_regs->CommonRegs.
-					    serialPortInterfaceReg,
-					    ISP_NVRAM_MASK | qdev->
-					    eeprom_cmd_data | dataBit);
+			/* If the bit changed, change the DO state to match */
+			ql_write_nvram_reg(qdev, spir,
+					   (ISP_NVRAM_MASK |
+					    qdev->eeprom_cmd_data | dataBit));
 			previousBit = dataBit;
 		}
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->
-				    eeprom_cmd_data | dataBit |
-				    AUBURN_EEPROM_CLK_RISE);
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->
-				    eeprom_cmd_data | dataBit |
-				    AUBURN_EEPROM_CLK_FALL);
+		ql_write_nvram_reg(qdev, spir,
+				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				    dataBit | AUBURN_EEPROM_CLK_RISE));
+		ql_write_nvram_reg(qdev, spir,
+				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				    dataBit | AUBURN_EEPROM_CLK_FALL));
 		cmd = cmd << 1;
 	}
 
@@ -445,33 +438,24 @@
 	/* Force the previous data bit to be different */
 	previousBit = 0xffff;
 	for (i = 0; i < addrBits; i++) {
-		dataBit =
-		    (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
-		    AUBURN_EEPROM_DO_0;
+		dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+			: AUBURN_EEPROM_DO_0;
 		if (previousBit != dataBit) {
 			/*
 			 * If the bit changed, then change the DO state to
 			 * match
 			 */
-			ql_write_nvram_reg(qdev,
-					    &port_regs->CommonRegs.
-					    serialPortInterfaceReg,
-					    ISP_NVRAM_MASK | qdev->
-					    eeprom_cmd_data | dataBit);
+			ql_write_nvram_reg(qdev, spir,
+					   (ISP_NVRAM_MASK |
+					    qdev->eeprom_cmd_data | dataBit));
 			previousBit = dataBit;
 		}
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->
-				    eeprom_cmd_data | dataBit |
-				    AUBURN_EEPROM_CLK_RISE);
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->
-				    eeprom_cmd_data | dataBit |
-				    AUBURN_EEPROM_CLK_FALL);
+		ql_write_nvram_reg(qdev, spir,
+				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				    dataBit | AUBURN_EEPROM_CLK_RISE));
+		ql_write_nvram_reg(qdev, spir,
+				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				    dataBit | AUBURN_EEPROM_CLK_FALL));
 		eepromAddr = eepromAddr << 1;
 	}
 }
@@ -482,10 +466,11 @@
 static void fm93c56a_deselect(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
+	u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
-	ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-			    ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 }
 
 /*
@@ -497,29 +482,23 @@
 	u32 data = 0;
 	u32 dataBit;
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
+	u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
 	/* Read the data bits */
 	/* The first bit is a dummy.  Clock right over it. */
 	for (i = 0; i < dataBits; i++) {
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-				    AUBURN_EEPROM_CLK_RISE);
-		ql_write_nvram_reg(qdev,
-				    &port_regs->CommonRegs.
-				    serialPortInterfaceReg,
-				    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-				    AUBURN_EEPROM_CLK_FALL);
-		dataBit =
-		    (ql_read_common_reg
-		     (qdev,
-		      &port_regs->CommonRegs.
-		      serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+		ql_write_nvram_reg(qdev, spir,
+				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				   AUBURN_EEPROM_CLK_RISE);
+		ql_write_nvram_reg(qdev, spir,
+				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+				   AUBURN_EEPROM_CLK_FALL);
+		dataBit = (ql_read_common_reg(qdev, spir) &
+			   AUBURN_EEPROM_DI_1) ? 1 : 0;
 		data = (data << 1) | dataBit;
 	}
-	*value = (u16) data;
+	*value = (u16)data;
 }
 
 /*
@@ -551,13 +530,12 @@
 
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-	pEEPROMData = (u16 *) & qdev->nvram_data;
+	pEEPROMData = (u16 *)&qdev->nvram_data;
 	qdev->eeprom_cmd_data = 0;
-	if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 10)) {
-		printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
-			__func__);
+		pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return -1;
 	}
@@ -570,8 +548,8 @@
 	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 
 	if (checksum != 0) {
-		printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
-		       qdev->ndev->name, checksum);
+		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+			   checksum);
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return -1;
 	}
@@ -587,7 +565,7 @@
 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 temp;
 	int count = 1000;
 
@@ -604,7 +582,7 @@
 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 scanControl;
 
 	if (qdev->numPorts > 1) {
@@ -632,7 +610,7 @@
 {
 	u8 ret;
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    				qdev->mem_map_registers;
+					qdev->mem_map_registers;
 
 	/* See if scan mode is enabled before we turn it off */
 	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
@@ -662,17 +640,13 @@
 			       u16 regAddr, u16 value, u32 phyAddr)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u8 scanWasEnabled;
 
 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -683,11 +657,7 @@
 
 	/* Wait for write to complete 9/10/04 SJP */
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -698,21 +668,17 @@
 }
 
 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
-			      u16 * value, u32 phyAddr)
+			      u16 *value, u32 phyAddr)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u8 scanWasEnabled;
 	u32 temp;
 
 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -727,11 +693,7 @@
 
 	/* Wait for the read to complete */
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free after issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -747,16 +709,12 @@
 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 
 	ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -767,11 +725,7 @@
 
 	/* Wait for write to complete. */
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -784,16 +738,12 @@
 {
 	u32 temp;
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 
 	ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -808,11 +758,7 @@
 
 	/* Wait for the read to complete */
 	if (ql_wait_for_mii_ready(qdev)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Timed out waiting for management port to "
-			       "get free before issuing command.\n",
-			       qdev->ndev->name);
+		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 		return -1;
 	}
 
@@ -898,7 +844,7 @@
 
 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
 {
-	printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
+	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
 	/* power down device bit 11 = 1 */
 	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
 	/* enable diagnostic mode bit 2 = 1 */
@@ -918,7 +864,8 @@
 	/* point to hidden reg 0x2806 */
 	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
 	/* Write new PHYAD w/bit 5 set */
-	ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+	ql_mii_write_reg_ex(qdev, 0x11,
+			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
 	/*
 	 * Disable diagnostic mode bit 2 = 0
 	 * Power up device bit 11 = 0
@@ -929,21 +876,19 @@
 	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
 }
 
-static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
-				 u16 phyIdReg0, u16 phyIdReg1)
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+				       u16 phyIdReg0, u16 phyIdReg1)
 {
-	PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
+	enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
 	u32   oui;
 	u16   model;
 	int i;
 
-	if (phyIdReg0 == 0xffff) {
+	if (phyIdReg0 == 0xffff)
 		return result;
-	}
 
-	if (phyIdReg1 == 0xffff) {
+	if (phyIdReg1 == 0xffff)
 		return result;
-	}
 
 	/* oui is split between two registers */
 	oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
@@ -951,15 +896,13 @@
 	model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
 
 	/* Scan table for this PHY */
-	for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
-		if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
-		{
+	for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+		if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+		    (model == PHY_DEVICES[i].phyIdModel)) {
+			netdev_info(qdev->ndev, "Phy: %s\n",
+				    PHY_DEVICES[i].name);
 			result = PHY_DEVICES[i].phyDevice;
-
-			printk(KERN_INFO "%s: Phy: %s\n",
-				qdev->ndev->name, PHY_DEVICES[i].name);
-
-		        break;
+			break;
 		}
 	}
 
@@ -970,9 +913,8 @@
 {
 	u16 reg;
 
-	switch(qdev->phyType) {
-	case PHY_AGERE_ET1011C:
-	{
+	switch (qdev->phyType) {
+	case PHY_AGERE_ET1011C: {
 		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
 			return 0;
 
@@ -980,20 +922,20 @@
 		break;
 	}
 	default:
-	if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
-		return 0;
+		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+			return 0;
 
-	reg = (((reg & 0x18) >> 3) & 3);
+		reg = (((reg & 0x18) >> 3) & 3);
 	}
 
-	switch(reg) {
-		case 2:
+	switch (reg) {
+	case 2:
 		return SPEED_1000;
-		case 1:
+	case 1:
 		return SPEED_100;
-		case 0:
+	case 0:
 		return SPEED_10;
-		default:
+	default:
 		return -1;
 	}
 }
@@ -1002,17 +944,15 @@
 {
 	u16 reg;
 
-	switch(qdev->phyType) {
-	case PHY_AGERE_ET1011C:
-	{
+	switch (qdev->phyType) {
+	case PHY_AGERE_ET1011C: {
 		if (ql_mii_read_reg(qdev, 0x1A, &reg))
 			return 0;
 
 		return ((reg & 0x0080) && (reg & 0x1000)) != 0;
 	}
 	case PHY_VITESSE_VSC8211:
-	default:
-	{
+	default: {
 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 			return 0;
 		return (reg & PHY_AUX_DUPLEX_STAT) != 0;
@@ -1040,17 +980,15 @@
 
 	/*  Determine the PHY we are using by reading the ID's */
 	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
-	if(err != 0) {
-		printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
-		       qdev->ndev->name);
-                return err;
+	if (err != 0) {
+		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+		return err;
 	}
 
 	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
-	if(err != 0) {
-		printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
-		       qdev->ndev->name);
-                return err;
+	if (err != 0) {
+		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+		return err;
 	}
 
 	/*  Check if we have a Agere PHY */
@@ -1058,24 +996,22 @@
 
 		/* Determine which MII address we should be using
 		   determined by the index of the card */
-		if (qdev->mac_index == 0) {
+		if (qdev->mac_index == 0)
 			miiAddr = MII_AGERE_ADDR_1;
-		} else {
+		else
 			miiAddr = MII_AGERE_ADDR_2;
-		}
 
-		err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
-		if(err != 0) {
-			printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
-		       	       qdev->ndev->name);
+		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+		if (err != 0) {
+			netdev_err(qdev->ndev,
+				   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
 			return err;
 		}
 
 		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
-		if(err != 0) {
-			printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
-			       qdev->ndev->name);
-        	        return err;
+		if (err != 0) {
+			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+			return err;
 		}
 
 		/*  We need to remember to initialize the Agere PHY */
@@ -1090,7 +1026,7 @@
 		/* need this here so address gets changed */
 		phyAgereSpecificInit(qdev, miiAddr);
 	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
-		printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "PHY is unknown\n");
 		return -EIO;
 	}
 
@@ -1103,7 +1039,7 @@
 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 value;
 
 	if (enable)
@@ -1123,7 +1059,7 @@
 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 value;
 
 	if (enable)
@@ -1143,7 +1079,7 @@
 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 value;
 
 	if (enable)
@@ -1163,7 +1099,7 @@
 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 value;
 
 	if (enable)
@@ -1183,7 +1119,7 @@
 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 value;
 
 	if (enable)
@@ -1205,7 +1141,7 @@
 static int ql_is_fiber(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp;
 
@@ -1235,7 +1171,7 @@
 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp;
 
@@ -1250,18 +1186,11 @@
 
 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
 	if (temp & bitToCheck) {
-		if (netif_msg_link(qdev))
-			printk(KERN_INFO PFX
-			       "%s: Auto-Negotiate complete.\n",
-			       qdev->ndev->name);
+		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
 		return 1;
-	} else {
-		if (netif_msg_link(qdev))
-			printk(KERN_WARNING PFX
-			       "%s: Auto-Negotiate incomplete.\n",
-			       qdev->ndev->name);
-		return 0;
 	}
+	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+	return 0;
 }
 
 /*
@@ -1278,7 +1207,7 @@
 static int ql_auto_neg_error(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp;
 
@@ -1316,7 +1245,7 @@
 static int ql_link_down_detect(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp;
 
@@ -1340,7 +1269,7 @@
 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 
 	switch (qdev->mac_index) {
 	case 0:
@@ -1370,7 +1299,7 @@
 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp;
 
@@ -1387,16 +1316,13 @@
 
 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
 	if (temp & bitToCheck) {
-		if (netif_msg_link(qdev))
-			printk(KERN_DEBUG PFX
-			       "%s: is not link master.\n", qdev->ndev->name);
+		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+			     "not link master\n");
 		return 0;
-	} else {
-		if (netif_msg_link(qdev))
-			printk(KERN_DEBUG PFX
-			       "%s: is link master.\n", qdev->ndev->name);
-		return 1;
 	}
+
+	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+	return 1;
 }
 
 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
@@ -1410,19 +1336,20 @@
 	u16 reg;
 	u16 portConfiguration;
 
-	if(qdev->phyType == PHY_AGERE_ET1011C) {
-		/* turn off external loopback */
+	if (qdev->phyType == PHY_AGERE_ET1011C)
 		ql_mii_write_reg(qdev, 0x13, 0x0000);
-	}
+					/* turn off external loopback */
 
-	if(qdev->mac_index == 0)
-		portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
+	if (qdev->mac_index == 0)
+		portConfiguration =
+			qdev->nvram_data.macCfg_port0.portConfiguration;
 	else
-		portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
+		portConfiguration =
+			qdev->nvram_data.macCfg_port1.portConfiguration;
 
 	/*  Some HBA's in the field are set to 0 and they need to
 	    be reinterpreted with a default value */
-	if(portConfiguration == 0)
+	if (portConfiguration == 0)
 		portConfiguration = PORT_CONFIG_DEFAULT;
 
 	/* Set the 1000 advertisements */
@@ -1430,8 +1357,8 @@
 			   PHYAddr[qdev->mac_index]);
 	reg &= ~PHY_GIG_ALL_PARAMS;
 
-	if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
-		if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+	if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+		if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
 			reg |= PHY_GIG_ADV_1000F;
 		else
 			reg |= PHY_GIG_ADV_1000H;
@@ -1445,29 +1372,27 @@
 			   PHYAddr[qdev->mac_index]);
 	reg &= ~PHY_NEG_ALL_PARAMS;
 
-	if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+	if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
 		reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
 
-	if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
-		if(portConfiguration & PORT_CONFIG_100MB_SPEED)
+	if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
 			reg |= PHY_NEG_ADV_100F;
 
-		if(portConfiguration & PORT_CONFIG_10MB_SPEED)
+		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
 			reg |= PHY_NEG_ADV_10F;
 	}
 
-	if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
-		if(portConfiguration & PORT_CONFIG_100MB_SPEED)
+	if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
 			reg |= PHY_NEG_ADV_100H;
 
-		if(portConfiguration & PORT_CONFIG_10MB_SPEED)
+		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
 			reg |= PHY_NEG_ADV_10H;
 	}
 
-	if(portConfiguration &
-	   PORT_CONFIG_1000MB_SPEED) {
+	if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
 		reg |= 1;
-	}
 
 	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
 			    PHYAddr[qdev->mac_index]);
@@ -1492,7 +1417,7 @@
 static u32 ql_get_link_state(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	u32 bitToCheck = 0;
 	u32 temp, linkState;
 
@@ -1504,22 +1429,22 @@
 		bitToCheck = PORT_STATUS_UP1;
 		break;
 	}
+
 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
-	if (temp & bitToCheck) {
+	if (temp & bitToCheck)
 		linkState = LS_UP;
-	} else {
+	else
 		linkState = LS_DOWN;
-	}
+
 	return linkState;
 }
 
 static int ql_port_start(struct ql3_adapter *qdev)
 {
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 7)) {
-		printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
 		return -1;
 	}
 
@@ -1537,19 +1462,16 @@
 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
 {
 
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 7))
 		return -1;
 
 	if (!ql_auto_neg_error(qdev)) {
-		if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
 			/* configure the MAC */
-			if (netif_msg_link(qdev))
-				printk(KERN_DEBUG PFX
-				       "%s: Configuring link.\n",
-				       qdev->ndev->
-				       name);
+			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+				     "Configuring link\n");
 			ql_mac_cfg_soft_reset(qdev, 1);
 			ql_mac_cfg_gig(qdev,
 				       (ql_get_link_speed
@@ -1564,43 +1486,32 @@
 			ql_mac_cfg_soft_reset(qdev, 0);
 
 			/* enable the MAC */
-			if (netif_msg_link(qdev))
-				printk(KERN_DEBUG PFX
-				       "%s: Enabling mac.\n",
-				       qdev->ndev->
-					       name);
+			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+				     "Enabling mac\n");
 			ql_mac_enable(qdev, 1);
 		}
 
 		qdev->port_link_state = LS_UP;
 		netif_start_queue(qdev->ndev);
 		netif_carrier_on(qdev->ndev);
-		if (netif_msg_link(qdev))
-			printk(KERN_INFO PFX
-			       "%s: Link is up at %d Mbps, %s duplex.\n",
-			       qdev->ndev->name,
-			       ql_get_link_speed(qdev),
-			       ql_is_link_full_dup(qdev)
-			       ? "full" : "half");
+		netif_info(qdev, link, qdev->ndev,
+			   "Link is up at %d Mbps, %s duplex\n",
+			   ql_get_link_speed(qdev),
+			   ql_is_link_full_dup(qdev) ? "full" : "half");
 
 	} else {	/* Remote error detected */
 
-		if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
-			if (netif_msg_link(qdev))
-				printk(KERN_DEBUG PFX
-				       "%s: Remote error detected. "
-				       "Calling ql_port_start().\n",
-				       qdev->ndev->
-				       name);
+		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+				     "Remote error detected. Calling ql_port_start()\n");
 			/*
 			 * ql_port_start() is shared code and needs
 			 * to lock the PHY on it's own.
 			 */
 			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
-			if(ql_port_start(qdev))	{/* Restart port */
+			if (ql_port_start(qdev))	/* Restart port */
 				return -1;
-			} else
-				return 0;
+			return 0;
 		}
 	}
 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
@@ -1619,33 +1530,28 @@
 
 	curr_link_state = ql_get_link_state(qdev);
 
-	if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
-		if (netif_msg_link(qdev))
-			printk(KERN_INFO PFX
-			       "%s: Reset in progress, skip processing link "
-			       "state.\n", qdev->ndev->name);
+	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+		netif_info(qdev, link, qdev->ndev,
+			   "Reset in progress, skip processing link state\n");
 
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
 		/* Restart timer on 2 second interval. */
-		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
+		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
 
 		return;
 	}
 
 	switch (qdev->port_link_state) {
 	default:
-		if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+		if (test_bit(QL_LINK_MASTER, &qdev->flags))
 			ql_port_start(qdev);
-		}
 		qdev->port_link_state = LS_DOWN;
 		/* Fall Through */
 
 	case LS_DOWN:
 		if (curr_link_state == LS_UP) {
-			if (netif_msg_link(qdev))
-				printk(KERN_INFO PFX "%s: Link is up.\n",
-				       qdev->ndev->name);
+			netif_info(qdev, link, qdev->ndev, "Link is up\n");
 			if (ql_is_auto_neg_complete(qdev))
 				ql_finish_auto_neg(qdev);
 
@@ -1662,9 +1568,7 @@
 		 * back up
 		 */
 		if (curr_link_state == LS_DOWN) {
-			if (netif_msg_link(qdev))
-				printk(KERN_INFO PFX "%s: Link is down.\n",
-				       qdev->ndev->name);
+			netif_info(qdev, link, qdev->ndev, "Link is down\n");
 			qdev->port_link_state = LS_DOWN;
 		}
 		if (ql_link_down_detect(qdev))
@@ -1683,9 +1587,9 @@
 static void ql_get_phy_owner(struct ql3_adapter *qdev)
 {
 	if (ql_this_adapter_controls_port(qdev))
-		set_bit(QL_LINK_MASTER,&qdev->flags);
+		set_bit(QL_LINK_MASTER, &qdev->flags);
 	else
-		clear_bit(QL_LINK_MASTER,&qdev->flags);
+		clear_bit(QL_LINK_MASTER, &qdev->flags);
 }
 
 /*
@@ -1695,7 +1599,7 @@
 {
 	ql_mii_enable_scan_mode(qdev);
 
-	if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
 		if (ql_this_adapter_controls_port(qdev))
 			ql_petbi_init_ex(qdev);
 	} else {
@@ -1705,18 +1609,18 @@
 }
 
 /*
- * MII_Setup needs to be called before taking the PHY out of reset so that the
- * management interface clock speed can be set properly.  It would be better if
- * we had a way to disable MDC until after the PHY is out of reset, but we
- * don't have that capability.
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
  */
 static int ql_mii_setup(struct ql3_adapter *qdev)
 {
 	u32 reg;
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 7))
 		return -1;
@@ -1735,24 +1639,24 @@
 	return 0;
 }
 
+#define SUPPORTED_OPTICAL_MODES	(SUPPORTED_1000baseT_Full |	\
+				 SUPPORTED_FIBRE |		\
+				 SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES	(SUPPORTED_10baseT_Half |	\
+				 SUPPORTED_10baseT_Full |	\
+				 SUPPORTED_100baseT_Half |	\
+				 SUPPORTED_100baseT_Full |	\
+				 SUPPORTED_1000baseT_Half |	\
+				 SUPPORTED_1000baseT_Full |	\
+				 SUPPORTED_Autoneg |		\
+				 SUPPORTED_TP);			\
+
 static u32 ql_supported_modes(struct ql3_adapter *qdev)
 {
-	u32 supported;
+	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+		return SUPPORTED_OPTICAL_MODES;
 
-	if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
-		supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
-		    | SUPPORTED_Autoneg;
-	} else {
-		supported = SUPPORTED_10baseT_Half
-		    | SUPPORTED_10baseT_Full
-		    | SUPPORTED_100baseT_Half
-		    | SUPPORTED_100baseT_Full
-		    | SUPPORTED_1000baseT_Half
-		    | SUPPORTED_1000baseT_Full
-		    | SUPPORTED_Autoneg | SUPPORTED_TP;
-	}
-
-	return supported;
+	return SUPPORTED_TP_MODES;
 }
 
 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
@@ -1760,9 +1664,9 @@
 	int status;
 	unsigned long hw_flags;
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7)) {
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+			    (QL_RESOURCE_BITS_BASE_CODE |
+			     (qdev->mac_index) * 2) << 7)) {
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return 0;
 	}
@@ -1777,9 +1681,9 @@
 	u32 status;
 	unsigned long hw_flags;
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7)) {
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+			    (QL_RESOURCE_BITS_BASE_CODE |
+			     (qdev->mac_index) * 2) << 7)) {
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return 0;
 	}
@@ -1794,9 +1698,9 @@
 	int status;
 	unsigned long hw_flags;
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7)) {
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+			    (QL_RESOURCE_BITS_BASE_CODE |
+			     (qdev->mac_index) * 2) << 7)) {
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return 0;
 	}
@@ -1806,7 +1710,6 @@
 	return status;
 }
 
-
 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 {
 	struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -1814,7 +1717,7 @@
 	ecmd->transceiver = XCVR_INTERNAL;
 	ecmd->supported = ql_supported_modes(qdev);
 
-	if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
 		ecmd->port = PORT_FIBRE;
 	} else {
 		ecmd->port = PORT_TP;
@@ -1855,10 +1758,11 @@
 			      struct ethtool_pauseparam *pause)
 {
 	struct ql3_adapter *qdev = netdev_priv(ndev);
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	u32 reg;
-	if(qdev->mac_index == 0)
+	if (qdev->mac_index == 0)
 		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
 	else
 		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
@@ -1885,12 +1789,12 @@
 
 	while (lrg_buf_cb) {
 		if (!lrg_buf_cb->skb) {
-			lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
-							   qdev->lrg_buffer_len);
+			lrg_buf_cb->skb =
+				netdev_alloc_skb(qdev->ndev,
+						 qdev->lrg_buffer_len);
 			if (unlikely(!lrg_buf_cb->skb)) {
-				printk(KERN_DEBUG PFX
-				       "%s: Failed netdev_alloc_skb().\n",
-				       qdev->ndev->name);
+				netdev_printk(KERN_DEBUG, qdev->ndev,
+					      "Failed netdev_alloc_skb()\n");
 				break;
 			} else {
 				/*
@@ -1905,9 +1809,10 @@
 						     PCI_DMA_FROMDEVICE);
 
 				err = pci_dma_mapping_error(qdev->pdev, map);
-				if(err) {
-					printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-					       qdev->ndev->name, err);
+				if (err) {
+					netdev_err(qdev->ndev,
+						   "PCI mapping failed with error: %d\n",
+						   err);
 					dev_kfree_skb(lrg_buf_cb->skb);
 					lrg_buf_cb->skb = NULL;
 					break;
@@ -1915,9 +1820,9 @@
 
 
 				lrg_buf_cb->buf_phy_addr_low =
-				    cpu_to_le32(LS_64BITS(map));
+					cpu_to_le32(LS_64BITS(map));
 				lrg_buf_cb->buf_phy_addr_high =
-				    cpu_to_le32(MS_64BITS(map));
+					cpu_to_le32(MS_64BITS(map));
 				dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
 				dma_unmap_len_set(lrg_buf_cb, maplen,
 						  qdev->lrg_buffer_len -
@@ -1937,7 +1842,9 @@
  */
 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
+
 	if (qdev->small_buf_release_cnt >= 16) {
 		while (qdev->small_buf_release_cnt >= 16) {
 			qdev->small_buf_q_producer_index++;
@@ -1961,7 +1868,8 @@
 	struct bufq_addr_element *lrg_buf_q_ele;
 	int i;
 	struct ql_rcv_buf_cb *lrg_buf_cb;
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	if ((qdev->lrg_buf_free_count >= 8) &&
 	    (qdev->lrg_buf_release_cnt >= 16)) {
@@ -1989,7 +1897,8 @@
 
 			qdev->lrg_buf_q_producer_index++;
 
-			if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
+			if (qdev->lrg_buf_q_producer_index ==
+			    qdev->num_lbufq_entries)
 				qdev->lrg_buf_q_producer_index = 0;
 
 			if (qdev->lrg_buf_q_producer_index ==
@@ -2011,23 +1920,26 @@
 	int i;
 	int retval = 0;
 
-	if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
-		printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
+	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+		netdev_warn(qdev->ndev,
+			    "Frame too short but it was padded and sent\n");
 	}
 
 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
 
 	/*  Check the transmit response flags for any errors */
-	if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
-		printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
+	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+		netdev_err(qdev->ndev,
+			   "Frame too short to be legal, frame not sent\n");
 
 		qdev->ndev->stats.tx_errors++;
 		retval = -EIO;
 		goto frame_not_sent;
 	}
 
-	if(tx_cb->seg_count == 0) {
-		printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
+	if (tx_cb->seg_count == 0) {
+		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+			   mac_rsp->transaction_id);
 
 		qdev->ndev->stats.tx_errors++;
 		retval = -EIO;
@@ -2073,7 +1985,7 @@
 	qdev->lrg_buf_release_cnt++;
 	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
 		qdev->lrg_buf_index = 0;
-	return(lrg_buf_cb);
+	return lrg_buf_cb;
 }
 
 /*
@@ -2177,12 +2089,11 @@
 		if (checksum &
 			(IB_IP_IOCB_RSP_3032_ICE |
 			 IB_IP_IOCB_RSP_3032_CE)) {
-			printk(KERN_ERR
-			       "%s: Bad checksum for this %s packet, checksum = %x.\n",
-			       __func__,
-			       ((checksum &
-				IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
-				"UDP"),checksum);
+			netdev_err(ndev,
+				   "%s: Bad checksum for this %s packet, checksum = %x\n",
+				   __func__,
+				   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+				    "TCP" : "UDP"), checksum);
 		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
 				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
 				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
@@ -2215,8 +2126,8 @@
 		net_rsp = qdev->rsp_current;
 		rmb();
 		/*
-		 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
-		 * inbound completion is for a VLAN.
+		 * Fix 4032 chip's undocumented "feature" where bit-8 is set
+		 * if the inbound completion is for a VLAN.
 		 */
 		if (qdev->device_id == QL3032_DEVICE_ID)
 			net_rsp->opcode &= 0x7f;
@@ -2242,22 +2153,18 @@
 						 net_rsp);
 			(*rx_cleaned)++;
 			break;
-		default:
-			{
-				u32 *tmp = (u32 *) net_rsp;
-				printk(KERN_ERR PFX
-				       "%s: Hit default case, not "
-				       "handled!\n"
-				       "	dropping the packet, opcode = "
-				       "%x.\n",
-				       ndev->name, net_rsp->opcode);
-				printk(KERN_ERR PFX
-				       "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
-				       (unsigned long int)tmp[0],
-				       (unsigned long int)tmp[1],
-				       (unsigned long int)tmp[2],
-				       (unsigned long int)tmp[3]);
-			}
+		default: {
+			u32 *tmp = (u32 *)net_rsp;
+			netdev_err(ndev,
+				   "Hit default case, not handled!\n"
+				   "	dropping the packet, opcode = %x\n"
+				   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+				   net_rsp->opcode,
+				   (unsigned long int)tmp[0],
+				   (unsigned long int)tmp[1],
+				   (unsigned long int)tmp[2],
+				   (unsigned long int)tmp[3]);
+		}
 		}
 
 		qdev->rsp_consumer_index++;
@@ -2280,7 +2187,8 @@
 	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
 	int rx_cleaned = 0, tx_cleaned = 0;
 	unsigned long hw_flags;
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 
 	ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
 
@@ -2303,15 +2211,14 @@
 
 	struct net_device *ndev = dev_id;
 	struct ql3_adapter *qdev = netdev_priv(ndev);
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	u32 value;
 	int handled = 1;
 	u32 var;
 
-	port_regs = qdev->mem_map_registers;
-
-	value =
-	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+	value = ql_read_common_reg_l(qdev,
+				     &port_regs->CommonRegs.ispControlStatus);
 
 	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
 		spin_lock(&qdev->adapter_lock);
@@ -2319,7 +2226,7 @@
 		netif_carrier_off(qdev->ndev);
 		ql_disable_interrupts(qdev);
 		qdev->port_link_state = LS_DOWN;
-		set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
+		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
 
 		if (value & ISP_CONTROL_FE) {
 			/*
@@ -2328,69 +2235,53 @@
 			var =
 			    ql_read_page0_reg_l(qdev,
 					      &port_regs->PortFatalErrStatus);
-			printk(KERN_WARNING PFX
-			       "%s: Resetting chip. PortFatalErrStatus "
-			       "register = 0x%x\n", ndev->name, var);
-			set_bit(QL_RESET_START,&qdev->flags) ;
+			netdev_warn(ndev,
+				    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+				    var);
+			set_bit(QL_RESET_START, &qdev->flags) ;
 		} else {
 			/*
 			 * Soft Reset Requested.
 			 */
-			set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
-			printk(KERN_ERR PFX
-			       "%s: Another function issued a reset to the "
-			       "chip. ISR value = %x.\n", ndev->name, value);
+			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+			netdev_err(ndev,
+				   "Another function issued a reset to the chip. ISR value = %x\n",
+				   value);
 		}
 		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
 		spin_unlock(&qdev->adapter_lock);
 	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
 		ql_disable_interrupts(qdev);
-		if (likely(napi_schedule_prep(&qdev->napi))) {
+		if (likely(napi_schedule_prep(&qdev->napi)))
 			__napi_schedule(&qdev->napi);
-		}
-	} else {
+	} else
 		return IRQ_NONE;
-	}
 
 	return IRQ_RETVAL(handled);
 }
 
 /*
- * Get the total number of segments needed for the
- * given number of fragments.  This is necessary because
- * outbound address lists (OAL) will be used when more than
- * two frags are given.  Each address list has 5 addr/len
- * pairs.  The 5th pair in each AOL is used to  point to
- * the next AOL if more frags are coming.
- * That is why the frags:segment count  ratio is not linear.
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given.  Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to  point to the next OAL if more frags
+ * are coming.  That is why the frags:segment count ratio is not linear.
  */
-static int ql_get_seg_count(struct ql3_adapter *qdev,
-			    unsigned short frags)
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
 {
 	if (qdev->device_id == QL3022_DEVICE_ID)
 		return 1;
 
-	switch(frags) {
-	case 0:	return 1;	/* just the skb->data seg */
-	case 1:	return 2;	/* skb->data + 1 frag */
-	case 2:	return 3;	/* skb->data + 2 frags */
-	case 3:	return 5;	/* skb->data + 1 frag + 1 AOL containting 2 frags */
-	case 4:	return 6;
-	case 5:	return 7;
-	case 6:	return 8;
-	case 7:	return 10;
-	case 8:	return 11;
-	case 9:	return 12;
-	case 10: return 13;
-	case 11: return 15;
-	case 12: return 16;
-	case 13: return 17;
-	case 14: return 18;
-	case 15: return 20;
-	case 16: return 21;
-	case 17: return 22;
-	case 18: return 23;
-	}
+	if (frags <= 2)
+		return frags + 1;
+	else if (frags <= 6)
+		return frags + 2;
+	else if (frags <= 10)
+		return frags + 3;
+	else if (frags <= 14)
+		return frags + 4;
+	else if (frags <= 18)
+		return frags + 5;
 	return -1;
 }
 
@@ -2413,8 +2304,8 @@
 }
 
 /*
- * Map the buffers for this transmit.  This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  */
 static int ql_send_map(struct ql3_adapter *qdev,
 				struct ob_mac_iocb_req *mac_iocb_ptr,
@@ -2437,9 +2328,9 @@
 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
 
 	err = pci_dma_mapping_error(qdev->pdev, map);
-	if(err) {
-		printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-		       qdev->ndev->name, err);
+	if (err) {
+		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+			   err);
 
 		return NETDEV_TX_BUSY;
 	}
@@ -2455,65 +2346,67 @@
 	if (seg_cnt == 1) {
 		/* Terminate the last segment. */
 		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
-	} else {
-		oal = tx_cb->oal;
-		for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
-			skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
-			oal_entry++;
-			if ((seg == 2 && seg_cnt > 3) ||	/* Check for continuation */
-			    (seg == 7 && seg_cnt > 8) ||	/* requirements. It's strange */
-			    (seg == 12 && seg_cnt > 13) ||	/* but necessary. */
-			    (seg == 17 && seg_cnt > 18)) {
-				/* Continuation entry points to outbound address list. */
-				map = pci_map_single(qdev->pdev, oal,
-						     sizeof(struct oal),
-						     PCI_DMA_TODEVICE);
-
-				err = pci_dma_mapping_error(qdev->pdev, map);
-				if(err) {
-
-					printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
-					       qdev->ndev->name, err);
-					goto map_error;
-				}
-
-				oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
-				oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
-				oal_entry->len =
-				    cpu_to_le32(sizeof(struct oal) |
-						OAL_CONT_ENTRY);
-				dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
-						   map);
-				dma_unmap_len_set(&tx_cb->map[seg], maplen,
-						  sizeof(struct oal));
-				oal_entry = (struct oal_entry *)oal;
-				oal++;
-				seg++;
-			}
-
-			map =
-			    pci_map_page(qdev->pdev, frag->page,
-					 frag->page_offset, frag->size,
-					 PCI_DMA_TODEVICE);
+		return NETDEV_TX_OK;
+	}
+	oal = tx_cb->oal;
+	for (completed_segs = 0;
+	     completed_segs < frag_cnt;
+	     completed_segs++, seg++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+		oal_entry++;
+		/*
+		 * Check for continuation requirements.
+		 * It's strange but necessary.
+		 * Continuation entry points to outbound address list.
+		 */
+		if ((seg == 2 && seg_cnt > 3) ||
+		    (seg == 7 && seg_cnt > 8) ||
+		    (seg == 12 && seg_cnt > 13) ||
+		    (seg == 17 && seg_cnt > 18)) {
+			map = pci_map_single(qdev->pdev, oal,
+					     sizeof(struct oal),
+					     PCI_DMA_TODEVICE);
 
 			err = pci_dma_mapping_error(qdev->pdev, map);
-			if(err) {
-				printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
-				       qdev->ndev->name, err);
+			if (err) {
+				netdev_err(qdev->ndev,
+					   "PCI mapping outbound address list with error: %d\n",
+					   err);
 				goto map_error;
 			}
 
 			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
 			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
-			oal_entry->len = cpu_to_le32(frag->size);
+			oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+						     OAL_CONT_ENTRY);
 			dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
 			dma_unmap_len_set(&tx_cb->map[seg], maplen,
-					  frag->size);
+					  sizeof(struct oal));
+			oal_entry = (struct oal_entry *)oal;
+			oal++;
+			seg++;
 		}
-		/* Terminate the last segment. */
-		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
-	}
 
+		map = pci_map_page(qdev->pdev, frag->page,
+				   frag->page_offset, frag->size,
+				   PCI_DMA_TODEVICE);
+
+		err = pci_dma_mapping_error(qdev->pdev, map);
+		if (err) {
+			netdev_err(qdev->ndev,
+				   "PCI mapping frags failed with error: %d\n",
+				   err);
+			goto map_error;
+		}
+
+		oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+		oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+		oal_entry->len = cpu_to_le32(frag->size);
+		dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+		dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+		}
+	/* Terminate the last segment. */
+	oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
 	return NETDEV_TX_OK;
 
 map_error:
@@ -2525,13 +2418,18 @@
 	seg = 1;
 	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
 	oal = tx_cb->oal;
-	for (i=0; i<completed_segs; i++,seg++) {
+	for (i = 0; i < completed_segs; i++, seg++) {
 		oal_entry++;
 
-		if((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
-		   (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
-		   (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
-		   (seg == 17 && seg_cnt > 18)) {
+		/*
+		 * Check for continuation requirements.
+		 * It's strange but necessary.
+		 */
+
+		if ((seg == 2 && seg_cnt > 3) ||
+		    (seg == 7 && seg_cnt > 8) ||
+		    (seg == 12 && seg_cnt > 13) ||
+		    (seg == 17 && seg_cnt > 18)) {
 			pci_unmap_single(qdev->pdev,
 				dma_unmap_addr(&tx_cb->map[seg], mapaddr),
 				dma_unmap_len(&tx_cb->map[seg], maplen),
@@ -2570,19 +2468,20 @@
 			       struct net_device *ndev)
 {
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+			qdev->mem_map_registers;
 	struct ql_tx_buf_cb *tx_cb;
 	u32 tot_len = skb->len;
 	struct ob_mac_iocb_req *mac_iocb_ptr;
 
-	if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+	if (unlikely(atomic_read(&qdev->tx_count) < 2))
 		return NETDEV_TX_BUSY;
-	}
 
-	tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
-	if((tx_cb->seg_count = ql_get_seg_count(qdev,
-						(skb_shinfo(skb)->nr_frags))) == -1) {
-		printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
+	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+	tx_cb->seg_count = ql_get_seg_count(qdev,
+					     skb_shinfo(skb)->nr_frags);
+	if (tx_cb->seg_count == -1) {
+		netdev_err(ndev, "%s: invalid segment count!\n", __func__);
 		return NETDEV_TX_OK;
 	}
 
@@ -2598,8 +2497,8 @@
 	    skb->ip_summed == CHECKSUM_PARTIAL)
 		ql_hw_csum_setup(skb, mac_iocb_ptr);
 
-	if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
-		printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
+	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+		netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -2612,9 +2511,9 @@
 			    &port_regs->CommonRegs.reqQProducerIndex,
 			    qdev->req_producer_index);
 
-	if (netif_msg_tx_queued(qdev))
-		printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
-		       ndev->name, qdev->req_producer_index, skb->len);
+	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+		     "tx queued, slot %d, len %d\n",
+		     qdev->req_producer_index, skb->len);
 
 	atomic_dec(&qdev->tx_count);
 	return NETDEV_TX_OK;
@@ -2632,8 +2531,7 @@
 
 	if ((qdev->req_q_virt_addr == NULL) ||
 	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
-		printk(KERN_ERR PFX "%s: reqQ failed.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "reqQ failed\n");
 		return -ENOMEM;
 	}
 
@@ -2646,25 +2544,22 @@
 
 	if ((qdev->rsp_q_virt_addr == NULL) ||
 	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
-		printk(KERN_ERR PFX
-		       "%s: rspQ allocation failed\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "rspQ allocation failed\n");
 		pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
 				    qdev->req_q_virt_addr,
 				    qdev->req_q_phy_addr);
 		return -ENOMEM;
 	}
 
-	set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
 
 	return 0;
 }
 
 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
 {
-	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
-		printk(KERN_INFO PFX
-		       "%s: Already done.\n", qdev->ndev->name);
+	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+		netdev_info(qdev->ndev, "Already done\n");
 		return;
 	}
 
@@ -2680,34 +2575,34 @@
 
 	qdev->rsp_q_virt_addr = NULL;
 
-	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
 }
 
 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
 {
 	/* Create Large Buffer Queue */
 	qdev->lrg_buf_q_size =
-	    qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
 	if (qdev->lrg_buf_q_size < PAGE_SIZE)
 		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
 	else
 		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
 
-	qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
+	qdev->lrg_buf =
+		kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
+			GFP_KERNEL);
 	if (qdev->lrg_buf == NULL) {
-		printk(KERN_ERR PFX
-		       "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
 		return -ENOMEM;
 	}
 
 	qdev->lrg_buf_q_alloc_virt_addr =
-	    pci_alloc_consistent(qdev->pdev,
-				 qdev->lrg_buf_q_alloc_size,
-				 &qdev->lrg_buf_q_alloc_phy_addr);
+		pci_alloc_consistent(qdev->pdev,
+				     qdev->lrg_buf_q_alloc_size,
+				     &qdev->lrg_buf_q_alloc_phy_addr);
 
 	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
-		printk(KERN_ERR PFX
-		       "%s: lBufQ failed\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "lBufQ failed\n");
 		return -ENOMEM;
 	}
 	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2715,21 +2610,19 @@
 
 	/* Create Small Buffer Queue */
 	qdev->small_buf_q_size =
-	    NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+		NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
 	if (qdev->small_buf_q_size < PAGE_SIZE)
 		qdev->small_buf_q_alloc_size = PAGE_SIZE;
 	else
 		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
 
 	qdev->small_buf_q_alloc_virt_addr =
-	    pci_alloc_consistent(qdev->pdev,
-				 qdev->small_buf_q_alloc_size,
-				 &qdev->small_buf_q_alloc_phy_addr);
+		pci_alloc_consistent(qdev->pdev,
+				     qdev->small_buf_q_alloc_size,
+				     &qdev->small_buf_q_alloc_phy_addr);
 
 	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
-		printk(KERN_ERR PFX
-		       "%s: Small Buffer Queue allocation failed.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
 		pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
 				    qdev->lrg_buf_q_alloc_virt_addr,
 				    qdev->lrg_buf_q_alloc_phy_addr);
@@ -2738,18 +2631,17 @@
 
 	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
 	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
-	set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
 	return 0;
 }
 
 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
 {
-	if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
-		printk(KERN_INFO PFX
-		       "%s: Already done.\n", qdev->ndev->name);
+	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+		netdev_info(qdev->ndev, "Already done\n");
 		return;
 	}
-	if(qdev->lrg_buf) kfree(qdev->lrg_buf);
+	kfree(qdev->lrg_buf);
 	pci_free_consistent(qdev->pdev,
 			    qdev->lrg_buf_q_alloc_size,
 			    qdev->lrg_buf_q_alloc_virt_addr,
@@ -2764,7 +2656,7 @@
 
 	qdev->small_buf_q_virt_addr = NULL;
 
-	clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
 }
 
 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
@@ -2774,18 +2666,16 @@
 
 	/* Currently we allocate on one of memory and use it for smallbuffers */
 	qdev->small_buf_total_size =
-	    (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
-	     QL_SMALL_BUFFER_SIZE);
+		(QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+		 QL_SMALL_BUFFER_SIZE);
 
 	qdev->small_buf_virt_addr =
-	    pci_alloc_consistent(qdev->pdev,
-				 qdev->small_buf_total_size,
-				 &qdev->small_buf_phy_addr);
+		pci_alloc_consistent(qdev->pdev,
+				     qdev->small_buf_total_size,
+				     &qdev->small_buf_phy_addr);
 
 	if (qdev->small_buf_virt_addr == NULL) {
-		printk(KERN_ERR PFX
-		       "%s: Failed to get small buffer memory.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
 		return -ENOMEM;
 	}
 
@@ -2804,15 +2694,14 @@
 		small_buf_q_entry++;
 	}
 	qdev->small_buf_index = 0;
-	set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
+	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
 	return 0;
 }
 
 static void ql_free_small_buffers(struct ql3_adapter *qdev)
 {
-	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
-		printk(KERN_INFO PFX
-		       "%s: Already done.\n", qdev->ndev->name);
+	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+		netdev_info(qdev->ndev, "Already done\n");
 		return;
 	}
 	if (qdev->small_buf_virt_addr != NULL) {
@@ -2874,11 +2763,9 @@
 				       qdev->lrg_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
-			printk(KERN_ERR PFX
-			       "%s: large buff alloc failed, "
-			       "for %d bytes at index %d.\n",
-			       qdev->ndev->name,
-			       qdev->lrg_buffer_len * 2, i);
+			netdev_err(qdev->ndev,
+				   "large buff alloc failed for %d bytes at index %d\n",
+				   qdev->lrg_buffer_len * 2, i);
 			ql_free_large_buffers(qdev);
 			return -ENOMEM;
 		} else {
@@ -2899,9 +2786,10 @@
 					     PCI_DMA_FROMDEVICE);
 
 			err = pci_dma_mapping_error(qdev->pdev, map);
-			if(err) {
-				printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-				       qdev->ndev->name, err);
+			if (err) {
+				netdev_err(qdev->ndev,
+					   "PCI mapping failed with error: %d\n",
+					   err);
 				ql_free_large_buffers(qdev);
 				return -ENOMEM;
 			}
@@ -2926,10 +2814,8 @@
 
 	tx_cb = &qdev->tx_buf[0];
 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
-		if (tx_cb->oal) {
-			kfree(tx_cb->oal);
-			tx_cb->oal = NULL;
-		}
+		kfree(tx_cb->oal);
+		tx_cb->oal = NULL;
 		tx_cb++;
 	}
 }
@@ -2938,8 +2824,7 @@
 {
 	struct ql_tx_buf_cb *tx_cb;
 	int i;
-	struct ob_mac_iocb_req *req_q_curr =
-					qdev->req_q_virt_addr;
+	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
 
 	/* Create free list of transmit buffers */
 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
@@ -2960,23 +2845,22 @@
 	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
 		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
 		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
-	}
-	else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
 		/*
 		 * Bigger buffers, so less of them.
 		 */
 		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
 		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
 	} else {
-		printk(KERN_ERR PFX
-		       "%s: Invalid mtu size.  Only 1500 and 9000 are accepted.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
+			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
 		return -ENOMEM;
 	}
-	qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+	qdev->num_large_buffers =
+		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
 	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
 	qdev->max_frame_size =
-	    (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
 
 	/*
 	 * First allocate a page of shared memory and use it for shadow
@@ -2984,51 +2868,44 @@
 	 * Network Completion Queue Producer Index Register
 	 */
 	qdev->shadow_reg_virt_addr =
-	    pci_alloc_consistent(qdev->pdev,
-				 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+		pci_alloc_consistent(qdev->pdev,
+				     PAGE_SIZE, &qdev->shadow_reg_phy_addr);
 
 	if (qdev->shadow_reg_virt_addr != NULL) {
 		qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
 		qdev->req_consumer_index_phy_addr_high =
-		    MS_64BITS(qdev->shadow_reg_phy_addr);
+			MS_64BITS(qdev->shadow_reg_phy_addr);
 		qdev->req_consumer_index_phy_addr_low =
-		    LS_64BITS(qdev->shadow_reg_phy_addr);
+			LS_64BITS(qdev->shadow_reg_phy_addr);
 
 		qdev->prsp_producer_index =
-		    (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
 		qdev->rsp_producer_index_phy_addr_high =
-		    qdev->req_consumer_index_phy_addr_high;
+			qdev->req_consumer_index_phy_addr_high;
 		qdev->rsp_producer_index_phy_addr_low =
-		    qdev->req_consumer_index_phy_addr_low + 8;
+			qdev->req_consumer_index_phy_addr_low + 8;
 	} else {
-		printk(KERN_ERR PFX
-		       "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
 		return -ENOMEM;
 	}
 
 	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
-		printk(KERN_ERR PFX
-		       "%s: ql_alloc_net_req_rsp_queues failed.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
 		goto err_req_rsp;
 	}
 
 	if (ql_alloc_buffer_queues(qdev) != 0) {
-		printk(KERN_ERR PFX
-		       "%s: ql_alloc_buffer_queues failed.\n",
-		       qdev->ndev->name);
+		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
 		goto err_buffer_queues;
 	}
 
 	if (ql_alloc_small_buffers(qdev) != 0) {
-		printk(KERN_ERR PFX
-		       "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
 		goto err_small_buffers;
 	}
 
 	if (ql_alloc_large_buffers(qdev) != 0) {
-		printk(KERN_ERR PFX
-		       "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
 		goto err_small_buffers;
 	}
 
@@ -3076,7 +2953,7 @@
 	struct ql3xxx_local_ram_registers __iomem *local_ram =
 	    (void __iomem *)qdev->mem_map_registers;
 
-	if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 4))
 		return -1;
@@ -3132,18 +3009,20 @@
 static int ql_adapter_initialize(struct ql3_adapter *qdev)
 {
 	u32 value;
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
+	u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
-						(void __iomem *)port_regs;
+		(void __iomem *)port_regs;
 	u32 delay = 10;
 	int status = 0;
 	unsigned long hw_flags = 0;
 
-	if(ql_mii_setup(qdev))
+	if (ql_mii_setup(qdev))
 		return -1;
 
 	/* Bring out PHY out of reset */
-	ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+	ql_write_common_reg(qdev, spir,
 			    (ISP_SERIAL_PORT_IF_WE |
 			     (ISP_SERIAL_PORT_IF_WE << 16)));
 	/* Give the PHY time to come out of reset. */
@@ -3152,13 +3031,13 @@
 	netif_carrier_off(qdev->ndev);
 
 	/* V2 chip fix for ARS-39168. */
-	ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+	ql_write_common_reg(qdev, spir,
 			    (ISP_SERIAL_PORT_IF_SDE |
 			     (ISP_SERIAL_PORT_IF_SDE << 16)));
 
 	/* Request Queue Registers */
-	*((u32 *) (qdev->preq_consumer_index)) = 0;
-	atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
+	*((u32 *)(qdev->preq_consumer_index)) = 0;
+	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
 	qdev->req_producer_index = 0;
 
 	ql_write_page1_reg(qdev,
@@ -3208,7 +3087,9 @@
 			   &hmem_regs->rxLargeQBaseAddrLow,
 			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
 
-	ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
+	ql_write_page1_reg(qdev,
+			   &hmem_regs->rxLargeQLength,
+			   qdev->num_lbufq_entries);
 
 	ql_write_page1_reg(qdev,
 			   &hmem_regs->rxLargeBufferLength,
@@ -3258,7 +3139,7 @@
 	if ((value & PORT_STATUS_IC) == 0) {
 
 		/* Chip has not been configured yet, so let it rip. */
-		if(ql_init_misc_registers(qdev)) {
+		if (ql_init_misc_registers(qdev)) {
 			status = -1;
 			goto out;
 		}
@@ -3268,7 +3149,7 @@
 
 		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
 
-		if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
 				 * 2) << 13)) {
 			status = -1;
@@ -3291,7 +3172,7 @@
 					   &port_regs->mac0MaxFrameLengthReg,
 					   qdev->max_frame_size);
 
-	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 			 2) << 7)) {
 		status = -1;
@@ -3353,8 +3234,7 @@
 	} while (--delay);
 
 	if (delay == 0) {
-		printk(KERN_ERR PFX
-		       "%s: Hw Initialization timeout.\n", qdev->ndev->name);
+		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
 		status = -1;
 		goto out;
 	}
@@ -3385,7 +3265,8 @@
  */
 static int ql_adapter_reset(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	int status = 0;
 	u16 value;
 	int max_wait_time;
@@ -3396,17 +3277,14 @@
 	/*
 	 * Issue soft reset to chip.
 	 */
-	printk(KERN_DEBUG PFX
-	       "%s: Issue soft reset to chip.\n",
-	       qdev->ndev->name);
+	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
 	ql_write_common_reg(qdev,
 			    &port_regs->CommonRegs.ispControlStatus,
 			    ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
 
 	/* Wait 3 seconds for reset to complete. */
-	printk(KERN_DEBUG PFX
-	       "%s: Wait 10 milliseconds for reset to complete.\n",
-	       qdev->ndev->name);
+	netdev_printk(KERN_DEBUG, qdev->ndev,
+		      "Wait 10 milliseconds for reset to complete\n");
 
 	/* Wait until the firmware tells us the Soft Reset is done */
 	max_wait_time = 5;
@@ -3427,8 +3305,8 @@
 	value =
 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
 	if (value & ISP_CONTROL_RI) {
-		printk(KERN_DEBUG PFX
-		       "ql_adapter_reset: clearing RI after reset.\n");
+		netdev_printk(KERN_DEBUG, qdev->ndev,
+			      "clearing RI after reset\n");
 		ql_write_common_reg(qdev,
 				    &port_regs->CommonRegs.
 				    ispControlStatus,
@@ -3448,13 +3326,11 @@
 		 */
 		max_wait_time = 5;
 		do {
-			value =
-			    ql_read_common_reg(qdev,
-					       &port_regs->CommonRegs.
-					       ispControlStatus);
-			if ((value & ISP_CONTROL_FSR) == 0) {
+			value = ql_read_common_reg(qdev,
+						   &port_regs->CommonRegs.
+						   ispControlStatus);
+			if ((value & ISP_CONTROL_FSR) == 0)
 				break;
-			}
 			ssleep(1);
 		} while ((--max_wait_time));
 	}
@@ -3468,7 +3344,8 @@
 
 static void ql_set_mac_info(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	u32 value, port_status;
 	u8 func_number;
 
@@ -3484,9 +3361,9 @@
 		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
 		qdev->PHYAddr = PORT0_PHY_ADDRESS;
 		if (port_status & PORT_STATUS_SM0)
-			set_bit(QL_LINK_OPTICAL,&qdev->flags);
+			set_bit(QL_LINK_OPTICAL, &qdev->flags);
 		else
-			clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
 		break;
 
 	case ISP_CONTROL_FN1_NET:
@@ -3495,17 +3372,17 @@
 		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
 		qdev->PHYAddr = PORT1_PHY_ADDRESS;
 		if (port_status & PORT_STATUS_SM1)
-			set_bit(QL_LINK_OPTICAL,&qdev->flags);
+			set_bit(QL_LINK_OPTICAL, &qdev->flags);
 		else
-			clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
 		break;
 
 	case ISP_CONTROL_FN0_SCSI:
 	case ISP_CONTROL_FN1_SCSI:
 	default:
-		printk(KERN_DEBUG PFX
-		       "%s: Invalid function number, ispControlStatus = 0x%x\n",
-		       qdev->ndev->name,value);
+		netdev_printk(KERN_DEBUG, qdev->ndev,
+			      "Invalid function number, ispControlStatus = 0x%x\n",
+			      value);
 		break;
 	}
 	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
@@ -3516,32 +3393,26 @@
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 	struct pci_dev *pdev = qdev->pdev;
 
-	printk(KERN_INFO PFX
-	       "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
-	       DRV_NAME, qdev->index, qdev->chip_rev_id,
-	       (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
-	       qdev->pci_slot);
-	printk(KERN_INFO PFX
-	       "%s Interface.\n",
-	       test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
+	netdev_info(ndev,
+		    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+		    DRV_NAME, qdev->index, qdev->chip_rev_id,
+		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+		    qdev->pci_slot);
+	netdev_info(ndev, "%s Interface\n",
+		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
 
 	/*
 	 * Print PCI bus width/type.
 	 */
-	printk(KERN_INFO PFX
-	       "Bus interface is %s %s.\n",
-	       ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
-	       ((qdev->pci_x) ? "PCI-X" : "PCI"));
+	netdev_info(ndev, "Bus interface is %s %s\n",
+		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
 
-	printk(KERN_INFO PFX
-	       "mem  IO base address adjusted = 0x%p\n",
-	       qdev->mem_map_registers);
-	printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
+	netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
+		    qdev->mem_map_registers);
+	netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
 
-	if (netif_msg_probe(qdev))
-		printk(KERN_INFO PFX
-		       "%s: MAC address %pM\n",
-		       ndev->name, ndev->dev_addr);
+	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
 }
 
 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3552,17 +3423,16 @@
 	netif_stop_queue(ndev);
 	netif_carrier_off(ndev);
 
-	clear_bit(QL_ADAPTER_UP,&qdev->flags);
-	clear_bit(QL_LINK_MASTER,&qdev->flags);
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+	clear_bit(QL_LINK_MASTER, &qdev->flags);
 
 	ql_disable_interrupts(qdev);
 
 	free_irq(qdev->pdev->irq, ndev);
 
-	if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
-		printk(KERN_INFO PFX
-		       "%s: calling pci_disable_msi().\n", qdev->ndev->name);
-		clear_bit(QL_MSI_ENABLED,&qdev->flags);
+	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+		clear_bit(QL_MSI_ENABLED, &qdev->flags);
 		pci_disable_msi(qdev->pdev);
 	}
 
@@ -3576,17 +3446,16 @@
 
 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 		if (ql_wait_for_drvr_lock(qdev)) {
-			if ((soft_reset = ql_adapter_reset(qdev))) {
-				printk(KERN_ERR PFX
-				       "%s: ql_adapter_reset(%d) FAILED!\n",
-				       ndev->name, qdev->index);
+			soft_reset = ql_adapter_reset(qdev);
+			if (soft_reset) {
+				netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+					   qdev->index);
 			}
-			printk(KERN_ERR PFX
-				"%s: Releaseing driver lock via chip reset.\n",ndev->name);
+			netdev_err(ndev,
+				   "Releasing driver lock via chip reset\n");
 		} else {
-			printk(KERN_ERR PFX
-			       "%s: Could not acquire driver lock to do "
-			       "reset!\n", ndev->name);
+			netdev_err(ndev,
+				   "Could not acquire driver lock to do reset!\n");
 			retval = -1;
 		}
 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -3603,56 +3472,50 @@
 	unsigned long hw_flags;
 
 	if (ql_alloc_mem_resources(qdev)) {
-		printk(KERN_ERR PFX
-		       "%s Unable to  allocate buffers.\n", ndev->name);
+		netdev_err(ndev, "Unable to  allocate buffers\n");
 		return -ENOMEM;
 	}
 
 	if (qdev->msi) {
 		if (pci_enable_msi(qdev->pdev)) {
-			printk(KERN_ERR PFX
-			       "%s: User requested MSI, but MSI failed to "
-			       "initialize.  Continuing without MSI.\n",
-			       qdev->ndev->name);
+			netdev_err(ndev,
+				   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
 			qdev->msi = 0;
 		} else {
-			printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
-			set_bit(QL_MSI_ENABLED,&qdev->flags);
+			netdev_info(ndev, "MSI Enabled...\n");
+			set_bit(QL_MSI_ENABLED, &qdev->flags);
 			irq_flags &= ~IRQF_SHARED;
 		}
 	}
 
-	if ((err = request_irq(qdev->pdev->irq,
-			       ql3xxx_isr,
-			       irq_flags, ndev->name, ndev))) {
-		printk(KERN_ERR PFX
-		       "%s: Failed to reserve interrupt %d already in use.\n",
-		       ndev->name, qdev->pdev->irq);
+	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+			  irq_flags, ndev->name, ndev);
+	if (err) {
+		netdev_err(ndev,
+			   "Failed to reserve interrupt %d - already in use\n",
+			   qdev->pdev->irq);
 		goto err_irq;
 	}
 
 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-	if ((err = ql_wait_for_drvr_lock(qdev))) {
-		if ((err = ql_adapter_initialize(qdev))) {
-			printk(KERN_ERR PFX
-			       "%s: Unable to initialize adapter.\n",
-			       ndev->name);
+	err = ql_wait_for_drvr_lock(qdev);
+	if (err) {
+		err = ql_adapter_initialize(qdev);
+		if (err) {
+			netdev_err(ndev, "Unable to initialize adapter\n");
 			goto err_init;
 		}
-		printk(KERN_ERR PFX
-				"%s: Releaseing driver lock.\n",ndev->name);
+		netdev_err(ndev, "Releasing driver lock\n");
 		ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
 	} else {
-		printk(KERN_ERR PFX
-		       "%s: Could not acquire driver lock.\n",
-		       ndev->name);
+		netdev_err(ndev, "Could not acquire driver lock\n");
 		goto err_lock;
 	}
 
 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
-	set_bit(QL_ADAPTER_UP,&qdev->flags);
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
 
 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
 
@@ -3666,11 +3529,9 @@
 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	free_irq(qdev->pdev->irq, ndev);
 err_irq:
-	if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
-		printk(KERN_INFO PFX
-		       "%s: calling pci_disable_msi().\n",
-		       qdev->ndev->name);
-		clear_bit(QL_MSI_ENABLED,&qdev->flags);
+	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+		netdev_info(ndev, "calling pci_disable_msi()\n");
+		clear_bit(QL_MSI_ENABLED, &qdev->flags);
 		pci_disable_msi(qdev->pdev);
 	}
 	return err;
@@ -3678,10 +3539,9 @@
 
 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
 {
-	if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
-		printk(KERN_ERR PFX
-				"%s: Driver up/down cycle failed, "
-				"closing device\n",qdev->ndev->name);
+	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+		netdev_err(qdev->ndev,
+			   "Driver up/down cycle failed, closing device\n");
 		rtnl_lock();
 		dev_close(qdev->ndev);
 		rtnl_unlock();
@@ -3698,24 +3558,24 @@
 	 * Wait for device to recover from a reset.
 	 * (Rarely happens, but possible.)
 	 */
-	while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
+	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
 		msleep(50);
 
-	ql_adapter_down(qdev,QL_DO_RESET);
+	ql_adapter_down(qdev, QL_DO_RESET);
 	return 0;
 }
 
 static int ql3xxx_open(struct net_device *ndev)
 {
 	struct ql3_adapter *qdev = netdev_priv(ndev);
-	return (ql_adapter_up(qdev));
+	return ql_adapter_up(qdev);
 }
 
 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
 {
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 	struct ql3xxx_port_registers __iomem *port_regs =
-	    		qdev->mem_map_registers;
+			qdev->mem_map_registers;
 	struct sockaddr *addr = p;
 	unsigned long hw_flags;
 
@@ -3750,7 +3610,7 @@
 {
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 
-	printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
+	netdev_err(ndev, "Resetting...\n");
 	/*
 	 * Stop the queues, we've got a problem.
 	 */
@@ -3770,11 +3630,12 @@
 	u32 value;
 	struct ql_tx_buf_cb *tx_cb;
 	int max_wait_time, i;
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	unsigned long hw_flags;
 
-	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
-		clear_bit(QL_LINK_MASTER,&qdev->flags);
+	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+		clear_bit(QL_LINK_MASTER, &qdev->flags);
 
 		/*
 		 * Loop through the active list and return the skb.
@@ -3783,17 +3644,19 @@
 			int j;
 			tx_cb = &qdev->tx_buf[i];
 			if (tx_cb->skb) {
-				printk(KERN_DEBUG PFX
-				       "%s: Freeing lost SKB.\n",
-				       qdev->ndev->name);
+				netdev_printk(KERN_DEBUG, ndev,
+					      "Freeing lost SKB\n");
 				pci_unmap_single(qdev->pdev,
-					 dma_unmap_addr(&tx_cb->map[0], mapaddr),
+					 dma_unmap_addr(&tx_cb->map[0],
+							mapaddr),
 					 dma_unmap_len(&tx_cb->map[0], maplen),
 					 PCI_DMA_TODEVICE);
-				for(j=1;j<tx_cb->seg_count;j++) {
+				for (j = 1; j < tx_cb->seg_count; j++) {
 					pci_unmap_page(qdev->pdev,
-					       dma_unmap_addr(&tx_cb->map[j],mapaddr),
-					       dma_unmap_len(&tx_cb->map[j],maplen),
+					       dma_unmap_addr(&tx_cb->map[j],
+							      mapaddr),
+					       dma_unmap_len(&tx_cb->map[j],
+							     maplen),
 					       PCI_DMA_TODEVICE);
 				}
 				dev_kfree_skb(tx_cb->skb);
@@ -3801,8 +3664,7 @@
 			}
 		}
 
-		printk(KERN_ERR PFX
-		       "%s: Clearing NRI after reset.\n", qdev->ndev->name);
+		netdev_err(ndev, "Clearing NRI after reset\n");
 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 		ql_write_common_reg(qdev,
 				    &port_regs->CommonRegs.
@@ -3818,16 +3680,14 @@
 
 						   ispControlStatus);
 			if ((value & ISP_CONTROL_SR) == 0) {
-				printk(KERN_DEBUG PFX
-				       "%s: reset completed.\n",
-				       qdev->ndev->name);
+				netdev_printk(KERN_DEBUG, ndev,
+					      "reset completed\n");
 				break;
 			}
 
 			if (value & ISP_CONTROL_RI) {
-				printk(KERN_DEBUG PFX
-				       "%s: clearing NRI after reset.\n",
-				       qdev->ndev->name);
+				netdev_printk(KERN_DEBUG, ndev,
+					      "clearing NRI after reset\n");
 				ql_write_common_reg(qdev,
 						    &port_regs->
 						    CommonRegs.
@@ -3848,21 +3708,19 @@
 			 * Set the reset flags and clear the board again.
 			 * Nothing else to do...
 			 */
-			printk(KERN_ERR PFX
-			       "%s: Timed out waiting for reset to "
-			       "complete.\n", ndev->name);
-			printk(KERN_ERR PFX
-			       "%s: Do a reset.\n", ndev->name);
-			clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
-			clear_bit(QL_RESET_START,&qdev->flags);
-			ql_cycle_adapter(qdev,QL_DO_RESET);
+			netdev_err(ndev,
+				   "Timed out waiting for reset to complete\n");
+			netdev_err(ndev, "Do a reset\n");
+			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+			clear_bit(QL_RESET_START, &qdev->flags);
+			ql_cycle_adapter(qdev, QL_DO_RESET);
 			return;
 		}
 
-		clear_bit(QL_RESET_ACTIVE,&qdev->flags);
-		clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
-		clear_bit(QL_RESET_START,&qdev->flags);
-		ql_cycle_adapter(qdev,QL_NO_RESET);
+		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+		clear_bit(QL_RESET_START, &qdev->flags);
+		ql_cycle_adapter(qdev, QL_NO_RESET);
 	}
 }
 
@@ -3876,7 +3734,8 @@
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
 {
-	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	struct ql3xxx_port_registers __iomem *port_regs =
+		qdev->mem_map_registers;
 	u32 value;
 
 	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
@@ -3915,20 +3774,18 @@
 {
 	struct net_device *ndev = NULL;
 	struct ql3_adapter *qdev = NULL;
-	static int cards_found = 0;
+	static int cards_found;
 	int uninitialized_var(pci_using_dac), err;
 
 	err = pci_enable_device(pdev);
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot enable PCI device\n",
-		       pci_name(pdev));
+		pr_err("%s cannot enable PCI device\n", pci_name(pdev));
 		goto err_out;
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
-		printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
-		       pci_name(pdev));
+		pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
 		goto err_out_disable_pdev;
 	}
 
@@ -3943,15 +3800,13 @@
 	}
 
 	if (err) {
-		printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-		       pci_name(pdev));
+		pr_err("%s no usable DMA configuration\n", pci_name(pdev));
 		goto err_out_free_regions;
 	}
 
 	ndev = alloc_etherdev(sizeof(struct ql3_adapter));
 	if (!ndev) {
-		printk(KERN_ERR PFX "%s could not alloc etherdev\n",
-		       pci_name(pdev));
+		pr_err("%s could not alloc etherdev\n", pci_name(pdev));
 		err = -ENOMEM;
 		goto err_out_free_regions;
 	}
@@ -3978,8 +3833,7 @@
 
 	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
 	if (!qdev->mem_map_registers) {
-		printk(KERN_ERR PFX "%s: cannot map device registers\n",
-		       pci_name(pdev));
+		pr_err("%s: cannot map device registers\n", pci_name(pdev));
 		err = -EIO;
 		goto err_out_free_ndev;
 	}
@@ -3998,9 +3852,8 @@
 
 	/* make sure the EEPROM is good */
 	if (ql_get_nvram_params(qdev)) {
-		printk(KERN_ALERT PFX
-		       "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
-		       qdev->index);
+		pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+			 __func__, qdev->index);
 		err = -EIO;
 		goto err_out_iounmap;
 	}
@@ -4026,14 +3879,12 @@
 	 * Set the Maximum Memory Read Byte Count value. We do this to handle
 	 * jumbo frames.
 	 */
-	if (qdev->pci_x) {
+	if (qdev->pci_x)
 		pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
-	}
 
 	err = register_netdev(ndev);
 	if (err) {
-		printk(KERN_ERR PFX "%s: cannot register net device\n",
-		       pci_name(pdev));
+		pr_err("%s: cannot register net device\n", pci_name(pdev));
 		goto err_out_iounmap;
 	}
 
@@ -4052,10 +3903,10 @@
 	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
 	qdev->adapter_timer.data = (unsigned long)qdev;
 
-	if(!cards_found) {
-		printk(KERN_ALERT PFX "%s\n", DRV_STRING);
-		printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
-	    	   DRV_NAME, DRV_VERSION);
+	if (!cards_found) {
+		pr_alert("%s\n", DRV_STRING);
+		pr_alert("Driver name: %s, Version: %s\n",
+			 DRV_NAME, DRV_VERSION);
 	}
 	ql_display_dev_info(ndev);
 
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index e189477..9703893 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1074,8 +1074,8 @@
 /* Return codes for Error handling */
 #define QL_STATUS_INVALID_PARAM	-1
 
-#define MAX_BW			10000
-#define MIN_BW			100
+#define MAX_BW			100
+#define MIN_BW			1
 #define MAX_VLAN_ID		4095
 #define MIN_VLAN_ID		2
 #define MAX_TX_QUEUES		1
@@ -1083,8 +1083,7 @@
 #define DEFAULT_MAC_LEARN	1
 
 #define IS_VALID_VLAN(vlan)	(vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
-#define IS_VALID_BW(bw)		(bw >= MIN_BW && bw <= MAX_BW \
-							&& (bw % 100) == 0)
+#define IS_VALID_BW(bw)		(bw >= MIN_BW && bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)	(que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)	(que > 0 && que <= MAX_RX_QUEUES)
 #define IS_VALID_MODE(mode)	(mode == 0 || mode == 1)
@@ -1302,8 +1301,6 @@
 	int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
 	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
 	int (*config_led) (struct qlcnic_adapter *, u32, u32);
-	int (*set_ilb_mode) (struct qlcnic_adapter *);
-	void (*clear_ilb_mode) (struct qlcnic_adapter *);
 	int (*start_firmware) (struct qlcnic_adapter *);
 };
 
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 7d6558e..9328d59 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -678,6 +678,12 @@
 	int max_sds_rings = adapter->max_sds_rings;
 	int ret;
 
+	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+		dev_warn(&adapter->pdev->dev, "Loopback test not supported"
+				"for non privilege function\n");
+		return 0;
+	}
+
 	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
 		return -EIO;
 
@@ -685,13 +691,13 @@
 	if (ret)
 		goto clear_it;
 
-	ret = adapter->nic_ops->set_ilb_mode(adapter);
+	ret = qlcnic_set_ilb_mode(adapter);
 	if (ret)
 		goto done;
 
 	ret = qlcnic_do_ilb_test(adapter);
 
-	adapter->nic_ops->clear_ilb_mode(adapter);
+	qlcnic_clear_ilb_mode(adapter);
 
 done:
 	qlcnic_diag_free_res(netdev, max_sds_rings);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f1f7acf..b9615bd 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -107,8 +107,6 @@
 static int qlcnic_start_firmware(struct qlcnic_adapter *);
 
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
-static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
-static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
@@ -381,8 +379,6 @@
 	.get_mac_addr = qlcnic_get_mac_address,
 	.config_bridged_mode = qlcnic_config_bridged_mode,
 	.config_led = qlcnic_config_led,
-	.set_ilb_mode = qlcnic_set_ilb_mode,
-	.clear_ilb_mode = qlcnic_clear_ilb_mode,
 	.start_firmware = qlcnic_start_firmware
 };
 
@@ -390,8 +386,6 @@
 	.get_mac_addr = qlcnic_get_mac_address,
 	.config_bridged_mode = qlcnicvf_config_bridged_mode,
 	.config_led = qlcnicvf_config_led,
-	.set_ilb_mode = qlcnicvf_set_ilb_mode,
-	.clear_ilb_mode = qlcnicvf_clear_ilb_mode,
 	.start_firmware = qlcnicvf_start_firmware
 };
 
@@ -1182,6 +1176,7 @@
 	ret = qlcnic_fw_create_ctx(adapter);
 	if (ret) {
 		qlcnic_detach(adapter);
+		netif_device_attach(netdev);
 		return ret;
 	}
 
@@ -2841,18 +2836,6 @@
 	return -EOPNOTSUPP;
 }
 
-static int
-qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
-{
-	return -EOPNOTSUPP;
-}
-
-static void
-qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
-{
-	return;
-}
-
 static ssize_t
 qlcnic_store_bridged_mode(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 06b2188..a478786 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -18,8 +18,6 @@
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
 #define DRV_VERSION	"v1.00.00.25.00.00-01"
 
-#define PFX "qlge: "
-
 #define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */
 
 #define QLGE_VENDOR_ID    0x1077
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 548e901..4747492 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 
 #include "qlge.h"
@@ -446,7 +448,7 @@
 					MAC_ADDR_TYPE_CAM_MAC, i, value);
 		if (status) {
 			netif_err(qdev, drv, qdev->ndev,
-				  "Failed read of mac index register.\n");
+				  "Failed read of mac index register\n");
 			goto err;
 		}
 		*buf++ = value[0];	/* lower MAC address */
@@ -458,7 +460,7 @@
 					MAC_ADDR_TYPE_MULTI_MAC, i, value);
 		if (status) {
 			netif_err(qdev, drv, qdev->ndev,
-				  "Failed read of mac index register.\n");
+				  "Failed read of mac index register\n");
 			goto err;
 		}
 		*buf++ = value[0];	/* lower Mcast address */
@@ -482,7 +484,7 @@
 		status = ql_get_routing_reg(qdev, i, &value);
 		if (status) {
 			netif_err(qdev, drv, qdev->ndev,
-				  "Failed read of routing index register.\n");
+				  "Failed read of routing index register\n");
 			goto err;
 		} else {
 			*buf++ = value;
@@ -668,7 +670,7 @@
 			max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
 			break;
 		default:
-			printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+			pr_err("Bad type!!! 0x%08x\n", type);
 			max_index = 0;
 			max_offset = 0;
 			break;
@@ -738,7 +740,7 @@
 	int i;
 
 	if (!mpi_coredump) {
-		netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
+		netif_err(qdev, drv, qdev->ndev, "No memory available\n");
 		return -ENOMEM;
 	}
 
@@ -1234,7 +1236,7 @@
 
 	if (!netif_running(qdev->ndev)) {
 		netif_err(qdev, ifup, qdev->ndev,
-			  "Force Coredump can only be done from interface that is up.\n");
+			  "Force Coredump can only be done from interface that is up\n");
 		return;
 	}
 	ql_queue_fw_error(qdev);
@@ -1334,7 +1336,7 @@
 		     "Core is dumping to log file!\n");
 
 	for (i = 0; i < count; i += 8) {
-		printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+		pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
 			"%.08x %.08x %.08x\n", i,
 			tmp[i + 0],
 			tmp[i + 1],
@@ -1356,71 +1358,43 @@
 	for (i = 0; i < qdev->intr_count; i++) {
 		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
 		value = ql_read32(qdev, INTR_EN);
-		printk(KERN_ERR PFX
-		       "%s: Interrupt %d is %s.\n",
+		pr_err("%s: Interrupt %d is %s\n",
 		       qdev->ndev->name, i,
 		       (value & INTR_EN_EN ? "enabled" : "disabled"));
 	}
 }
 
+#define DUMP_XGMAC(qdev, reg)					\
+do {								\
+	u32 data;						\
+	ql_read_xgmac_reg(qdev, reg, &data);			\
+	pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+} while (0)
+
 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
 {
-	u32 data;
 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
-		printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
+		pr_err("%s: Couldn't get xgmac sem\n", __func__);
 		return;
 	}
-	ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
-	printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, TX_CFG, &data);
-	printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, RX_CFG, &data);
-	printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
-	printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
-	       qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
-	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
-	       qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
-	printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
-	printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
-	printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
-	printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
-	       qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
-	printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
-	       data);
-	ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
-	printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
-	       qdev->ndev->name, data);
-	ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
-	printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
-	       data);
+	DUMP_XGMAC(qdev, PAUSE_SRC_LO);
+	DUMP_XGMAC(qdev, PAUSE_SRC_HI);
+	DUMP_XGMAC(qdev, GLOBAL_CFG);
+	DUMP_XGMAC(qdev, TX_CFG);
+	DUMP_XGMAC(qdev, RX_CFG);
+	DUMP_XGMAC(qdev, FLOW_CTL);
+	DUMP_XGMAC(qdev, PAUSE_OPCODE);
+	DUMP_XGMAC(qdev, PAUSE_TIMER);
+	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
+	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
+	DUMP_XGMAC(qdev, MAC_TX_PARAMS);
+	DUMP_XGMAC(qdev, MAC_RX_PARAMS);
+	DUMP_XGMAC(qdev, MAC_SYS_INT);
+	DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
+	DUMP_XGMAC(qdev, MAC_MGMT_INT);
+	DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
+	DUMP_XGMAC(qdev, EXT_ARB_MODE);
 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
-
 }
 
 static void ql_dump_ets_regs(struct ql_adapter *qdev)
@@ -1437,14 +1411,12 @@
 		return;
 	for (i = 0; i < 4; i++) {
 		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
-			printk(KERN_ERR PFX
-			       "%s: Failed read of mac index register.\n",
+			pr_err("%s: Failed read of mac index register\n",
 			       __func__);
 			return;
 		} else {
 			if (value[0])
-				printk(KERN_ERR PFX
-				       "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
+				pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
 				       qdev->ndev->name, i, value[1], value[0],
 				       value[2]);
 		}
@@ -1452,14 +1424,12 @@
 	for (i = 0; i < 32; i++) {
 		if (ql_get_mac_addr_reg
 		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
-			printk(KERN_ERR PFX
-			       "%s: Failed read of mac index register.\n",
+			pr_err("%s: Failed read of mac index register\n",
 			       __func__);
 			return;
 		} else {
 			if (value[0])
-				printk(KERN_ERR PFX
-				       "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
+				pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
 				       qdev->ndev->name, i, value[1], value[0]);
 		}
 	}
@@ -1476,129 +1446,77 @@
 	for (i = 0; i < 16; i++) {
 		value = 0;
 		if (ql_get_routing_reg(qdev, i, &value)) {
-			printk(KERN_ERR PFX
-			       "%s: Failed read of routing index register.\n",
+			pr_err("%s: Failed read of routing index register\n",
 			       __func__);
 			return;
 		} else {
 			if (value)
-				printk(KERN_ERR PFX
-				       "%s: Routing Mask %d = 0x%.08x.\n",
+				pr_err("%s: Routing Mask %d = 0x%.08x\n",
 				       qdev->ndev->name, i, value);
 		}
 	}
 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 }
 
+#define DUMP_REG(qdev, reg)			\
+	pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+
 void ql_dump_regs(struct ql_adapter *qdev)
 {
-	printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
-	printk(KERN_ERR PFX "SYS	 			= 0x%x.\n",
-	       ql_read32(qdev, SYS));
-	printk(KERN_ERR PFX "RST_FO 			= 0x%x.\n",
-	       ql_read32(qdev, RST_FO));
-	printk(KERN_ERR PFX "FSC 				= 0x%x.\n",
-	       ql_read32(qdev, FSC));
-	printk(KERN_ERR PFX "CSR 				= 0x%x.\n",
-	       ql_read32(qdev, CSR));
-	printk(KERN_ERR PFX "ICB_RID 			= 0x%x.\n",
-	       ql_read32(qdev, ICB_RID));
-	printk(KERN_ERR PFX "ICB_L 				= 0x%x.\n",
-	       ql_read32(qdev, ICB_L));
-	printk(KERN_ERR PFX "ICB_H 				= 0x%x.\n",
-	       ql_read32(qdev, ICB_H));
-	printk(KERN_ERR PFX "CFG 				= 0x%x.\n",
-	       ql_read32(qdev, CFG));
-	printk(KERN_ERR PFX "BIOS_ADDR 			= 0x%x.\n",
-	       ql_read32(qdev, BIOS_ADDR));
-	printk(KERN_ERR PFX "STS 				= 0x%x.\n",
-	       ql_read32(qdev, STS));
-	printk(KERN_ERR PFX "INTR_EN			= 0x%x.\n",
-	       ql_read32(qdev, INTR_EN));
-	printk(KERN_ERR PFX "INTR_MASK 			= 0x%x.\n",
-	       ql_read32(qdev, INTR_MASK));
-	printk(KERN_ERR PFX "ISR1 				= 0x%x.\n",
-	       ql_read32(qdev, ISR1));
-	printk(KERN_ERR PFX "ISR2 				= 0x%x.\n",
-	       ql_read32(qdev, ISR2));
-	printk(KERN_ERR PFX "ISR3 				= 0x%x.\n",
-	       ql_read32(qdev, ISR3));
-	printk(KERN_ERR PFX "ISR4 				= 0x%x.\n",
-	       ql_read32(qdev, ISR4));
-	printk(KERN_ERR PFX "REV_ID 			= 0x%x.\n",
-	       ql_read32(qdev, REV_ID));
-	printk(KERN_ERR PFX "FRC_ECC_ERR 			= 0x%x.\n",
-	       ql_read32(qdev, FRC_ECC_ERR));
-	printk(KERN_ERR PFX "ERR_STS 			= 0x%x.\n",
-	       ql_read32(qdev, ERR_STS));
-	printk(KERN_ERR PFX "RAM_DBG_ADDR 			= 0x%x.\n",
-	       ql_read32(qdev, RAM_DBG_ADDR));
-	printk(KERN_ERR PFX "RAM_DBG_DATA 			= 0x%x.\n",
-	       ql_read32(qdev, RAM_DBG_DATA));
-	printk(KERN_ERR PFX "ECC_ERR_CNT 			= 0x%x.\n",
-	       ql_read32(qdev, ECC_ERR_CNT));
-	printk(KERN_ERR PFX "SEM 				= 0x%x.\n",
-	       ql_read32(qdev, SEM));
-	printk(KERN_ERR PFX "GPIO_1 			= 0x%x.\n",
-	       ql_read32(qdev, GPIO_1));
-	printk(KERN_ERR PFX "GPIO_2 			= 0x%x.\n",
-	       ql_read32(qdev, GPIO_2));
-	printk(KERN_ERR PFX "GPIO_3 			= 0x%x.\n",
-	       ql_read32(qdev, GPIO_3));
-	printk(KERN_ERR PFX "XGMAC_ADDR 			= 0x%x.\n",
-	       ql_read32(qdev, XGMAC_ADDR));
-	printk(KERN_ERR PFX "XGMAC_DATA 			= 0x%x.\n",
-	       ql_read32(qdev, XGMAC_DATA));
-	printk(KERN_ERR PFX "NIC_ETS 			= 0x%x.\n",
-	       ql_read32(qdev, NIC_ETS));
-	printk(KERN_ERR PFX "CNA_ETS 			= 0x%x.\n",
-	       ql_read32(qdev, CNA_ETS));
-	printk(KERN_ERR PFX "FLASH_ADDR 			= 0x%x.\n",
-	       ql_read32(qdev, FLASH_ADDR));
-	printk(KERN_ERR PFX "FLASH_DATA 			= 0x%x.\n",
-	       ql_read32(qdev, FLASH_DATA));
-	printk(KERN_ERR PFX "CQ_STOP 			= 0x%x.\n",
-	       ql_read32(qdev, CQ_STOP));
-	printk(KERN_ERR PFX "PAGE_TBL_RID 			= 0x%x.\n",
-	       ql_read32(qdev, PAGE_TBL_RID));
-	printk(KERN_ERR PFX "WQ_PAGE_TBL_LO 		= 0x%x.\n",
-	       ql_read32(qdev, WQ_PAGE_TBL_LO));
-	printk(KERN_ERR PFX "WQ_PAGE_TBL_HI 		= 0x%x.\n",
-	       ql_read32(qdev, WQ_PAGE_TBL_HI));
-	printk(KERN_ERR PFX "CQ_PAGE_TBL_LO 		= 0x%x.\n",
-	       ql_read32(qdev, CQ_PAGE_TBL_LO));
-	printk(KERN_ERR PFX "CQ_PAGE_TBL_HI 		= 0x%x.\n",
-	       ql_read32(qdev, CQ_PAGE_TBL_HI));
-	printk(KERN_ERR PFX "COS_DFLT_CQ1 			= 0x%x.\n",
-	       ql_read32(qdev, COS_DFLT_CQ1));
-	printk(KERN_ERR PFX "COS_DFLT_CQ2 			= 0x%x.\n",
-	       ql_read32(qdev, COS_DFLT_CQ2));
-	printk(KERN_ERR PFX "SPLT_HDR 			= 0x%x.\n",
-	       ql_read32(qdev, SPLT_HDR));
-	printk(KERN_ERR PFX "FC_PAUSE_THRES 		= 0x%x.\n",
-	       ql_read32(qdev, FC_PAUSE_THRES));
-	printk(KERN_ERR PFX "NIC_PAUSE_THRES 		= 0x%x.\n",
-	       ql_read32(qdev, NIC_PAUSE_THRES));
-	printk(KERN_ERR PFX "FC_ETHERTYPE 			= 0x%x.\n",
-	       ql_read32(qdev, FC_ETHERTYPE));
-	printk(KERN_ERR PFX "FC_RCV_CFG 			= 0x%x.\n",
-	       ql_read32(qdev, FC_RCV_CFG));
-	printk(KERN_ERR PFX "NIC_RCV_CFG 			= 0x%x.\n",
-	       ql_read32(qdev, NIC_RCV_CFG));
-	printk(KERN_ERR PFX "FC_COS_TAGS 			= 0x%x.\n",
-	       ql_read32(qdev, FC_COS_TAGS));
-	printk(KERN_ERR PFX "NIC_COS_TAGS 			= 0x%x.\n",
-	       ql_read32(qdev, NIC_COS_TAGS));
-	printk(KERN_ERR PFX "MGMT_RCV_CFG 			= 0x%x.\n",
-	       ql_read32(qdev, MGMT_RCV_CFG));
-	printk(KERN_ERR PFX "XG_SERDES_ADDR 		= 0x%x.\n",
-	       ql_read32(qdev, XG_SERDES_ADDR));
-	printk(KERN_ERR PFX "XG_SERDES_DATA 		= 0x%x.\n",
-	       ql_read32(qdev, XG_SERDES_DATA));
-	printk(KERN_ERR PFX "PRB_MX_ADDR 			= 0x%x.\n",
-	       ql_read32(qdev, PRB_MX_ADDR));
-	printk(KERN_ERR PFX "PRB_MX_DATA 			= 0x%x.\n",
-	       ql_read32(qdev, PRB_MX_DATA));
+	pr_err("reg dump for function #%d\n", qdev->func);
+	DUMP_REG(qdev, SYS);
+	DUMP_REG(qdev, RST_FO);
+	DUMP_REG(qdev, FSC);
+	DUMP_REG(qdev, CSR);
+	DUMP_REG(qdev, ICB_RID);
+	DUMP_REG(qdev, ICB_L);
+	DUMP_REG(qdev, ICB_H);
+	DUMP_REG(qdev, CFG);
+	DUMP_REG(qdev, BIOS_ADDR);
+	DUMP_REG(qdev, STS);
+	DUMP_REG(qdev, INTR_EN);
+	DUMP_REG(qdev, INTR_MASK);
+	DUMP_REG(qdev, ISR1);
+	DUMP_REG(qdev, ISR2);
+	DUMP_REG(qdev, ISR3);
+	DUMP_REG(qdev, ISR4);
+	DUMP_REG(qdev, REV_ID);
+	DUMP_REG(qdev, FRC_ECC_ERR);
+	DUMP_REG(qdev, ERR_STS);
+	DUMP_REG(qdev, RAM_DBG_ADDR);
+	DUMP_REG(qdev, RAM_DBG_DATA);
+	DUMP_REG(qdev, ECC_ERR_CNT);
+	DUMP_REG(qdev, SEM);
+	DUMP_REG(qdev, GPIO_1);
+	DUMP_REG(qdev, GPIO_2);
+	DUMP_REG(qdev, GPIO_3);
+	DUMP_REG(qdev, XGMAC_ADDR);
+	DUMP_REG(qdev, XGMAC_DATA);
+	DUMP_REG(qdev, NIC_ETS);
+	DUMP_REG(qdev, CNA_ETS);
+	DUMP_REG(qdev, FLASH_ADDR);
+	DUMP_REG(qdev, FLASH_DATA);
+	DUMP_REG(qdev, CQ_STOP);
+	DUMP_REG(qdev, PAGE_TBL_RID);
+	DUMP_REG(qdev, WQ_PAGE_TBL_LO);
+	DUMP_REG(qdev, WQ_PAGE_TBL_HI);
+	DUMP_REG(qdev, CQ_PAGE_TBL_LO);
+	DUMP_REG(qdev, CQ_PAGE_TBL_HI);
+	DUMP_REG(qdev, COS_DFLT_CQ1);
+	DUMP_REG(qdev, COS_DFLT_CQ2);
+	DUMP_REG(qdev, SPLT_HDR);
+	DUMP_REG(qdev, FC_PAUSE_THRES);
+	DUMP_REG(qdev, NIC_PAUSE_THRES);
+	DUMP_REG(qdev, FC_ETHERTYPE);
+	DUMP_REG(qdev, FC_RCV_CFG);
+	DUMP_REG(qdev, NIC_RCV_CFG);
+	DUMP_REG(qdev, FC_COS_TAGS);
+	DUMP_REG(qdev, NIC_COS_TAGS);
+	DUMP_REG(qdev, MGMT_RCV_CFG);
+	DUMP_REG(qdev, XG_SERDES_ADDR);
+	DUMP_REG(qdev, XG_SERDES_DATA);
+	DUMP_REG(qdev, PRB_MX_ADDR);
+	DUMP_REG(qdev, PRB_MX_DATA);
 	ql_dump_intr_states(qdev);
 	ql_dump_xgmac_control_regs(qdev);
 	ql_dump_ets_regs(qdev);
@@ -1608,191 +1526,124 @@
 #endif
 
 #ifdef QL_STAT_DUMP
+
+#define DUMP_STAT(qdev, stat)	\
+	pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+
 void ql_dump_stat(struct ql_adapter *qdev)
 {
-	printk(KERN_ERR "%s: Enter.\n", __func__);
-	printk(KERN_ERR "tx_pkts = %ld\n",
-	       (unsigned long)qdev->nic_stats.tx_pkts);
-	printk(KERN_ERR "tx_bytes = %ld\n",
-	       (unsigned long)qdev->nic_stats.tx_bytes);
-	printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_mcast_pkts);
-	printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_bcast_pkts);
-	printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_ucast_pkts);
-	printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_ctl_pkts);
-	printk(KERN_ERR "tx_pause_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_pause_pkts);
-	printk(KERN_ERR "tx_64_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_64_pkt);
-	printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
-	printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
-	printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_256_511_pkt);
-	printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
-	printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
-	printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
-	printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_undersize_pkt);
-	printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
-	       (unsigned long)qdev->nic_stats.tx_oversize_pkt);
-	printk(KERN_ERR "rx_bytes = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_bytes);
-	printk(KERN_ERR "rx_bytes_ok = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_bytes_ok);
-	printk(KERN_ERR "rx_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_pkts);
-	printk(KERN_ERR "rx_pkts_ok = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_pkts_ok);
-	printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_bcast_pkts);
-	printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_mcast_pkts);
-	printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_ucast_pkts);
-	printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_undersize_pkts);
-	printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_oversize_pkts);
-	printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_jabber_pkts);
-	printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
-	printk(KERN_ERR "rx_drop_events = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_drop_events);
-	printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
-	printk(KERN_ERR "rx_align_err = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_align_err);
-	printk(KERN_ERR "rx_symbol_err = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_symbol_err);
-	printk(KERN_ERR "rx_mac_err = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_mac_err);
-	printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_ctl_pkts);
-	printk(KERN_ERR "rx_pause_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_pause_pkts);
-	printk(KERN_ERR "rx_64_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_64_pkts);
-	printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
-	printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_128_255_pkts);
-	printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_256_511_pkts);
-	printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
-	printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
-	printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
-	printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
-	       (unsigned long)qdev->nic_stats.rx_len_err_pkts);
+	pr_err("%s: Enter\n", __func__);
+	DUMP_STAT(qdev, tx_pkts);
+	DUMP_STAT(qdev, tx_bytes);
+	DUMP_STAT(qdev, tx_mcast_pkts);
+	DUMP_STAT(qdev, tx_bcast_pkts);
+	DUMP_STAT(qdev, tx_ucast_pkts);
+	DUMP_STAT(qdev, tx_ctl_pkts);
+	DUMP_STAT(qdev, tx_pause_pkts);
+	DUMP_STAT(qdev, tx_64_pkt);
+	DUMP_STAT(qdev, tx_65_to_127_pkt);
+	DUMP_STAT(qdev, tx_128_to_255_pkt);
+	DUMP_STAT(qdev, tx_256_511_pkt);
+	DUMP_STAT(qdev, tx_512_to_1023_pkt);
+	DUMP_STAT(qdev, tx_1024_to_1518_pkt);
+	DUMP_STAT(qdev, tx_1519_to_max_pkt);
+	DUMP_STAT(qdev, tx_undersize_pkt);
+	DUMP_STAT(qdev, tx_oversize_pkt);
+	DUMP_STAT(qdev, rx_bytes);
+	DUMP_STAT(qdev, rx_bytes_ok);
+	DUMP_STAT(qdev, rx_pkts);
+	DUMP_STAT(qdev, rx_pkts_ok);
+	DUMP_STAT(qdev, rx_bcast_pkts);
+	DUMP_STAT(qdev, rx_mcast_pkts);
+	DUMP_STAT(qdev, rx_ucast_pkts);
+	DUMP_STAT(qdev, rx_undersize_pkts);
+	DUMP_STAT(qdev, rx_oversize_pkts);
+	DUMP_STAT(qdev, rx_jabber_pkts);
+	DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
+	DUMP_STAT(qdev, rx_drop_events);
+	DUMP_STAT(qdev, rx_fcerr_pkts);
+	DUMP_STAT(qdev, rx_align_err);
+	DUMP_STAT(qdev, rx_symbol_err);
+	DUMP_STAT(qdev, rx_mac_err);
+	DUMP_STAT(qdev, rx_ctl_pkts);
+	DUMP_STAT(qdev, rx_pause_pkts);
+	DUMP_STAT(qdev, rx_64_pkts);
+	DUMP_STAT(qdev, rx_65_to_127_pkts);
+	DUMP_STAT(qdev, rx_128_255_pkts);
+	DUMP_STAT(qdev, rx_256_511_pkts);
+	DUMP_STAT(qdev, rx_512_to_1023_pkts);
+	DUMP_STAT(qdev, rx_1024_to_1518_pkts);
+	DUMP_STAT(qdev, rx_1519_to_max_pkts);
+	DUMP_STAT(qdev, rx_len_err_pkts);
 };
 #endif
 
 #ifdef QL_DEV_DUMP
+
+#define DUMP_QDEV_FIELD(qdev, type, field)		\
+	pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+#define DUMP_QDEV_DMA_FIELD(qdev, field)		\
+	pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
+	pr_err("%s[%d].%s = " type "\n",		 \
+	       #array, index, #field, qdev->array[index].field);
 void ql_dump_qdev(struct ql_adapter *qdev)
 {
 	int i;
-	printk(KERN_ERR PFX "qdev->flags 			= %lx.\n",
-	       qdev->flags);
-	printk(KERN_ERR PFX "qdev->vlgrp 			= %p.\n",
-	       qdev->vlgrp);
-	printk(KERN_ERR PFX "qdev->pdev 			= %p.\n",
-	       qdev->pdev);
-	printk(KERN_ERR PFX "qdev->ndev 			= %p.\n",
-	       qdev->ndev);
-	printk(KERN_ERR PFX "qdev->chip_rev_id 		= %d.\n",
-	       qdev->chip_rev_id);
-	printk(KERN_ERR PFX "qdev->reg_base 		= %p.\n",
-	       qdev->reg_base);
-	printk(KERN_ERR PFX "qdev->doorbell_area 	= %p.\n",
-	       qdev->doorbell_area);
-	printk(KERN_ERR PFX "qdev->doorbell_area_size 	= %d.\n",
-	       qdev->doorbell_area_size);
-	printk(KERN_ERR PFX "msg_enable 		= %x.\n",
-	       qdev->msg_enable);
-	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area	= %p.\n",
-	       qdev->rx_ring_shadow_reg_area);
-	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma 	= %llx.\n",
-	       (unsigned long long) qdev->rx_ring_shadow_reg_dma);
-	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area	= %p.\n",
-	       qdev->tx_ring_shadow_reg_area);
-	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma	= %llx.\n",
-	       (unsigned long long) qdev->tx_ring_shadow_reg_dma);
-	printk(KERN_ERR PFX "qdev->intr_count 		= %d.\n",
-	       qdev->intr_count);
+	DUMP_QDEV_FIELD(qdev, "%lx", flags);
+	DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
+	DUMP_QDEV_FIELD(qdev, "%p", pdev);
+	DUMP_QDEV_FIELD(qdev, "%p", ndev);
+	DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
+	DUMP_QDEV_FIELD(qdev, "%p", reg_base);
+	DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
+	DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
+	DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
+	DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
+	DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
+	DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
+	DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
+	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
 	if (qdev->msi_x_entry)
 		for (i = 0; i < qdev->intr_count; i++) {
-			printk(KERN_ERR PFX
-			       "msi_x_entry.[%d]vector	= %d.\n", i,
-			       qdev->msi_x_entry[i].vector);
-			printk(KERN_ERR PFX
-			       "msi_x_entry.[%d]entry	= %d.\n", i,
-			       qdev->msi_x_entry[i].entry);
+			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
+			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
 		}
 	for (i = 0; i < qdev->intr_count; i++) {
-		printk(KERN_ERR PFX
-		       "intr_context[%d].qdev		= %p.\n", i,
-		       qdev->intr_context[i].qdev);
-		printk(KERN_ERR PFX
-		       "intr_context[%d].intr		= %d.\n", i,
-		       qdev->intr_context[i].intr);
-		printk(KERN_ERR PFX
-		       "intr_context[%d].hooked		= %d.\n", i,
-		       qdev->intr_context[i].hooked);
-		printk(KERN_ERR PFX
-		       "intr_context[%d].intr_en_mask	= 0x%08x.\n", i,
-		       qdev->intr_context[i].intr_en_mask);
-		printk(KERN_ERR PFX
-		       "intr_context[%d].intr_dis_mask	= 0x%08x.\n", i,
-		       qdev->intr_context[i].intr_dis_mask);
-		printk(KERN_ERR PFX
-		       "intr_context[%d].intr_read_mask	= 0x%08x.\n", i,
-		       qdev->intr_context[i].intr_read_mask);
+		DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
+		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
+		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
+		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
+		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
+		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
 	}
-	printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
-	printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
-	printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
-	printk(KERN_ERR PFX "qdev->ring_mem 	= %p.\n", qdev->ring_mem);
-	printk(KERN_ERR PFX "qdev->intr_count 	= %d.\n", qdev->intr_count);
-	printk(KERN_ERR PFX "qdev->tx_ring		= %p.\n",
-	       qdev->tx_ring);
-	printk(KERN_ERR PFX "qdev->rss_ring_count 	= %d.\n",
-	       qdev->rss_ring_count);
-	printk(KERN_ERR PFX "qdev->rx_ring	= %p.\n", qdev->rx_ring);
-	printk(KERN_ERR PFX "qdev->default_rx_queue	= %d.\n",
-	       qdev->default_rx_queue);
-	printk(KERN_ERR PFX "qdev->xg_sem_mask		= 0x%08x.\n",
-	       qdev->xg_sem_mask);
-	printk(KERN_ERR PFX "qdev->port_link_up		= 0x%08x.\n",
-	       qdev->port_link_up);
-	printk(KERN_ERR PFX "qdev->port_init		= 0x%08x.\n",
-	       qdev->port_init);
-
+	DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
+	DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
+	DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
+	DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
+	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
+	DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
+	DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
+	DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
+	DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
+	DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
+	DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
 }
 #endif
 
 #ifdef QL_CB_DUMP
 void ql_dump_wqicb(struct wqicb *wqicb)
 {
-	printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
-	printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
-	printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
-	printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
+	pr_err("Dumping wqicb stuff...\n");
+	pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+	pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
+	pr_err("wqicb->cq_id_rss = %d\n",
 	       le16_to_cpu(wqicb->cq_id_rss));
-	printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
-	printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
+	pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+	pr_err("wqicb->wq_addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(wqicb->addr));
-	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
+	pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
 }
 
@@ -1800,40 +1651,34 @@
 {
 	if (tx_ring == NULL)
 		return;
-	printk(KERN_ERR PFX
-	       "===================== Dumping tx_ring %d ===============.\n",
+	pr_err("===================== Dumping tx_ring %d ===============\n",
 	       tx_ring->wq_id);
-	printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
-	printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
+	pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
+	pr_err("tx_ring->base_dma = 0x%llx\n",
 	       (unsigned long long) tx_ring->wq_base_dma);
-	printk(KERN_ERR PFX
-	       "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
+	pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
 	       tx_ring->cnsmr_idx_sh_reg,
 	       tx_ring->cnsmr_idx_sh_reg
 			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
-	printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
-	printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
-	printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
-	       tx_ring->prod_idx_db_reg);
-	printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
-	       tx_ring->valid_db_reg);
-	printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
-	printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
-	printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
-	printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
-	printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
-	       atomic_read(&tx_ring->tx_count));
+	pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
+	pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
+	pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+	pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+	pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+	pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
+	pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
+	pr_err("tx_ring->q = %p\n", tx_ring->q);
+	pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
 }
 
 void ql_dump_ricb(struct ricb *ricb)
 {
 	int i;
-	printk(KERN_ERR PFX
-	       "===================== Dumping ricb ===============.\n");
-	printk(KERN_ERR PFX "Dumping ricb stuff...\n");
+	pr_err("===================== Dumping ricb ===============\n");
+	pr_err("Dumping ricb stuff...\n");
 
-	printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
-	printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
+	pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+	pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
 	       ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
 	       ricb->flags & RSS_L6K ? "RSS_L6K " : "",
 	       ricb->flags & RSS_LI ? "RSS_LI " : "",
@@ -1843,44 +1688,44 @@
 	       ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
 	       ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
 	       ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
-	printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
+	pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
 	for (i = 0; i < 16; i++)
-		printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
+		pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
 		       le32_to_cpu(ricb->hash_cq_id[i]));
 	for (i = 0; i < 10; i++)
-		printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
+		pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
 		       le32_to_cpu(ricb->ipv6_hash_key[i]));
 	for (i = 0; i < 4; i++)
-		printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
+		pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
 		       le32_to_cpu(ricb->ipv4_hash_key[i]));
 }
 
 void ql_dump_cqicb(struct cqicb *cqicb)
 {
-	printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
+	pr_err("Dumping cqicb stuff...\n");
 
-	printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
-	printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
-	printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
-	printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
+	pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
+	pr_err("cqicb->flags = %x\n", cqicb->flags);
+	pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+	pr_err("cqicb->addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(cqicb->addr));
-	printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
+	pr_err("cqicb->prod_idx_addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
-	printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
+	pr_err("cqicb->pkt_delay = 0x%.04x\n",
 	       le16_to_cpu(cqicb->pkt_delay));
-	printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
+	pr_err("cqicb->irq_delay = 0x%.04x\n",
 	       le16_to_cpu(cqicb->irq_delay));
-	printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
+	pr_err("cqicb->lbq_addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
-	printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
+	pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
 	       le16_to_cpu(cqicb->lbq_buf_size));
-	printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
+	pr_err("cqicb->lbq_len = 0x%.04x\n",
 	       le16_to_cpu(cqicb->lbq_len));
-	printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
+	pr_err("cqicb->sbq_addr = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
-	printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
+	pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
 	       le16_to_cpu(cqicb->sbq_buf_size));
-	printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
+	pr_err("cqicb->sbq_len = 0x%.04x\n",
 	       le16_to_cpu(cqicb->sbq_len));
 }
 
@@ -1888,100 +1733,85 @@
 {
 	if (rx_ring == NULL)
 		return;
-	printk(KERN_ERR PFX
-	       "===================== Dumping rx_ring %d ===============.\n",
+	pr_err("===================== Dumping rx_ring %d ===============\n",
 	       rx_ring->cq_id);
-	printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
+	pr_err("Dumping rx_ring %d, type = %s%s%s\n",
 	       rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
 	       rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
 	       rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
-	printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
-	printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
-	printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
+	pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+	pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
+	pr_err("rx_ring->cq_base_dma = %llx\n",
 	       (unsigned long long) rx_ring->cq_base_dma);
-	printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
-	printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
-	printk(KERN_ERR PFX
-	       "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
+	pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
+	pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
+	pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
 	       rx_ring->prod_idx_sh_reg,
 	       rx_ring->prod_idx_sh_reg
 			? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
-	printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
+	pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
 	       (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
-	printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
+	pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
 	       rx_ring->cnsmr_idx_db_reg);
-	printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
-	printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
-	printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
-	       rx_ring->valid_db_reg);
+	pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+	pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+	pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
 
-	printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
-	printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
+	pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
+	pr_err("rx_ring->lbq_base_dma = %llx\n",
 	       (unsigned long long) rx_ring->lbq_base_dma);
-	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
+	pr_err("rx_ring->lbq_base_indirect = %p\n",
 	       rx_ring->lbq_base_indirect);
-	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
+	pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
 	       (unsigned long long) rx_ring->lbq_base_indirect_dma);
-	printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
-	printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
-	printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
-	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
+	pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
+	pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
+	pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
+	pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
 	       rx_ring->lbq_prod_idx_db_reg);
-	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
-	       rx_ring->lbq_prod_idx);
-	printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
-	       rx_ring->lbq_curr_idx);
-	printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
-	       rx_ring->lbq_clean_idx);
-	printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
-	       rx_ring->lbq_free_cnt);
-	printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
-	       rx_ring->lbq_buf_size);
+	pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
+	pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+	pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
+	pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
+	pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
 
-	printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
-	printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
+	pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
+	pr_err("rx_ring->sbq_base_dma = %llx\n",
 	       (unsigned long long) rx_ring->sbq_base_dma);
-	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
+	pr_err("rx_ring->sbq_base_indirect = %p\n",
 	       rx_ring->sbq_base_indirect);
-	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
+	pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
 	       (unsigned long long) rx_ring->sbq_base_indirect_dma);
-	printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
-	printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
-	printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
-	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
+	pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
+	pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
+	pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
+	pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
 	       rx_ring->sbq_prod_idx_db_reg);
-	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
-	       rx_ring->sbq_prod_idx);
-	printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
-	       rx_ring->sbq_curr_idx);
-	printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
-	       rx_ring->sbq_clean_idx);
-	printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
-	       rx_ring->sbq_free_cnt);
-	printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
-	       rx_ring->sbq_buf_size);
-	printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
-	printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
-	printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
-	printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
+	pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
+	pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
+	pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
+	pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
+	pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+	pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
+	pr_err("rx_ring->irq = %d\n", rx_ring->irq);
+	pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
+	pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
 }
 
 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
 {
 	void *ptr;
 
-	printk(KERN_ERR PFX "%s: Enter.\n", __func__);
+	pr_err("%s: Enter\n", __func__);
 
 	ptr = kmalloc(size, GFP_ATOMIC);
 	if (ptr == NULL) {
-		printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
-		       __func__);
+		pr_err("%s: Couldn't allocate a buffer\n", __func__);
 		return;
 	}
 
 	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
-		printk(KERN_ERR "%s: Failed to upload control block!\n",
-		       __func__);
+		pr_err("%s: Failed to upload control block!\n", __func__);
 		goto fail_it;
 	}
 	switch (bit) {
@@ -1995,8 +1825,7 @@
 		ql_dump_ricb((struct ricb *)ptr);
 		break;
 	default:
-		printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
-		       __func__, bit);
+		pr_err("%s: Invalid bit value = %x\n", __func__, bit);
 		break;
 	}
 fail_it:
@@ -2007,27 +1836,27 @@
 #ifdef QL_OB_DUMP
 void ql_dump_tx_desc(struct tx_buf_desc *tbd)
 {
-	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	pr_err("tbd->addr  = 0x%llx\n",
 	       le64_to_cpu((u64) tbd->addr));
-	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	pr_err("tbd->len   = %d\n",
 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	pr_err("tbd->flags = %s %s\n",
 	       tbd->len & TX_DESC_C ? "C" : ".",
 	       tbd->len & TX_DESC_E ? "E" : ".");
 	tbd++;
-	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	pr_err("tbd->addr  = 0x%llx\n",
 	       le64_to_cpu((u64) tbd->addr));
-	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	pr_err("tbd->len   = %d\n",
 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	pr_err("tbd->flags = %s %s\n",
 	       tbd->len & TX_DESC_C ? "C" : ".",
 	       tbd->len & TX_DESC_E ? "E" : ".");
 	tbd++;
-	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+	pr_err("tbd->addr  = 0x%llx\n",
 	       le64_to_cpu((u64) tbd->addr));
-	printk(KERN_ERR PFX "tbd->len   = %d\n",
+	pr_err("tbd->len   = %d\n",
 	       le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+	pr_err("tbd->flags = %s %s\n",
 	       tbd->len & TX_DESC_C ? "C" : ".",
 	       tbd->len & TX_DESC_E ? "E" : ".");
 
@@ -2040,38 +1869,38 @@
 	struct tx_buf_desc *tbd;
 	u16 frame_len;
 
-	printk(KERN_ERR PFX "%s\n", __func__);
-	printk(KERN_ERR PFX "opcode         = %s\n",
+	pr_err("%s\n", __func__);
+	pr_err("opcode         = %s\n",
 	       (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
-	printk(KERN_ERR PFX "flags1          = %s %s %s %s %s\n",
+	pr_err("flags1          = %s %s %s %s %s\n",
 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
 	       ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
-	printk(KERN_ERR PFX "flags2          = %s %s %s\n",
+	pr_err("flags2          = %s %s %s\n",
 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
 	       ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
-	printk(KERN_ERR PFX "flags3          = %s %s %s\n",
+	pr_err("flags3          = %s %s %s\n",
 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
 	       ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
-	printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
-	printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
-	printk(KERN_ERR PFX "vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
+	pr_err("tid = %x\n", ob_mac_iocb->tid);
+	pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
+	pr_err("vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
 	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
-		printk(KERN_ERR PFX "frame_len      = %d\n",
+		pr_err("frame_len      = %d\n",
 		       le32_to_cpu(ob_mac_tso_iocb->frame_len));
-		printk(KERN_ERR PFX "mss      = %d\n",
+		pr_err("mss      = %d\n",
 		       le16_to_cpu(ob_mac_tso_iocb->mss));
-		printk(KERN_ERR PFX "prot_hdr_len   = %d\n",
+		pr_err("prot_hdr_len   = %d\n",
 		       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
-		printk(KERN_ERR PFX "hdr_offset     = 0x%.04x\n",
+		pr_err("hdr_offset     = 0x%.04x\n",
 		       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
 		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
 	} else {
-		printk(KERN_ERR PFX "frame_len      = %d\n",
+		pr_err("frame_len      = %d\n",
 		       le16_to_cpu(ob_mac_iocb->frame_len));
 		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
 	}
@@ -2081,9 +1910,9 @@
 
 void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
 {
-	printk(KERN_ERR PFX "%s\n", __func__);
-	printk(KERN_ERR PFX "opcode         = %d\n", ob_mac_rsp->opcode);
-	printk(KERN_ERR PFX "flags          = %s %s %s %s %s %s %s\n",
+	pr_err("%s\n", __func__);
+	pr_err("opcode         = %d\n", ob_mac_rsp->opcode);
+	pr_err("flags          = %s %s %s %s %s %s %s\n",
 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
@@ -2091,16 +1920,16 @@
 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
 	       ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
 	       ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
-	printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
+	pr_err("tid = %x\n", ob_mac_rsp->tid);
 }
 #endif
 
 #ifdef QL_IB_DUMP
 void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
-	printk(KERN_ERR PFX "%s\n", __func__);
-	printk(KERN_ERR PFX "opcode         = 0x%x\n", ib_mac_rsp->opcode);
-	printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
+	pr_err("%s\n", __func__);
+	pr_err("opcode         = 0x%x\n", ib_mac_rsp->opcode);
+	pr_err("flags1 = %s%s%s%s%s%s\n",
 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
@@ -2109,7 +1938,7 @@
 	       ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
 
 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
-		printk(KERN_ERR PFX "%s%s%s Multicast.\n",
+		pr_err("%s%s%s Multicast\n",
 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 		       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -2117,7 +1946,7 @@
 		       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 		       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
 
-	printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
+	pr_err("flags2 = %s%s%s%s%s\n",
 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
@@ -2125,7 +1954,7 @@
 	       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
 
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
-		printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
+		pr_err("%s%s%s%s%s error\n",
 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
 		       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
@@ -2137,12 +1966,12 @@
 		       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
 		       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
 
-	printk(KERN_ERR PFX "flags3 = %s%s.\n",
+	pr_err("flags3 = %s%s\n",
 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
 	       ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
 
 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
-		printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
+		pr_err("RSS flags = %s%s%s%s\n",
 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
 			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
@@ -2152,26 +1981,26 @@
 		       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
 			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
 
-	printk(KERN_ERR PFX "data_len	= %d\n",
+	pr_err("data_len	= %d\n",
 	       le32_to_cpu(ib_mac_rsp->data_len));
-	printk(KERN_ERR PFX "data_addr    = 0x%llx\n",
+	pr_err("data_addr    = 0x%llx\n",
 	       (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
-		printk(KERN_ERR PFX "rss    = %x\n",
+		pr_err("rss    = %x\n",
 		       le32_to_cpu(ib_mac_rsp->rss));
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
-		printk(KERN_ERR PFX "vlan_id    = %x\n",
+		pr_err("vlan_id    = %x\n",
 		       le16_to_cpu(ib_mac_rsp->vlan_id));
 
-	printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
+	pr_err("flags4 = %s%s%s\n",
 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
 		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
 
 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
-		printk(KERN_ERR PFX "hdr length	= %d.\n",
+		pr_err("hdr length	= %d\n",
 		       le32_to_cpu(ib_mac_rsp->hdr_len));
-		printk(KERN_ERR PFX "hdr addr    = 0x%llx.\n",
+		pr_err("hdr addr    = 0x%llx\n",
 		       (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
 	}
 }
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 7d482a2..142c381 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -510,7 +510,7 @@
 	if (!lp->phydev)
 		return -EINVAL;
 
-	return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
+	return phy_mii_ioctl(lp->phydev, rq, cmd);
 }
 
 static int r6040_rx(struct net_device *dev, int limit)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index b8b8584..18bc5b7 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5796,7 +5796,7 @@
 {
 	u8 *vpd_data;
 	u8 data;
-	int i = 0, cnt, fail = 0;
+	int i = 0, cnt, len, fail = 0;
 	int vpd_addr = 0x80;
 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
 
@@ -5837,20 +5837,28 @@
 
 	if (!fail) {
 		/* read serial number of adapter */
-		for (cnt = 0; cnt < 256; cnt++) {
+		for (cnt = 0; cnt < 252; cnt++) {
 			if ((vpd_data[cnt] == 'S') &&
-			    (vpd_data[cnt+1] == 'N') &&
-			    (vpd_data[cnt+2] < VPD_STRING_LEN)) {
-				memset(nic->serial_num, 0, VPD_STRING_LEN);
-				memcpy(nic->serial_num, &vpd_data[cnt + 3],
-				       vpd_data[cnt+2]);
-				break;
+			    (vpd_data[cnt+1] == 'N')) {
+				len = vpd_data[cnt+2];
+				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
+					memcpy(nic->serial_num,
+					       &vpd_data[cnt + 3],
+					       len);
+					memset(nic->serial_num+len,
+					       0,
+					       VPD_STRING_LEN-len);
+					break;
+				}
 			}
 		}
 	}
 
-	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN))
-		memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
+	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
+		len = vpd_data[1];
+		memcpy(nic->product_name, &vpd_data[3], len);
+		nic->product_name[len] = 0;
+	}
 	kfree(vpd_data);
 	swstats->mem_freed += 256;
 }
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 3645fb3..0af0335 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -65,7 +65,7 @@
 
 /* DEBUG message print. */
 #define DBG_PRINT(dbg_level, fmt, args...) do {			\
-	if (dbg_level >= debug_level)				\
+	if (dbg_level <= debug_level)				\
 		pr_info(fmt, ##args);				\
 	} while (0)
 
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index c762c6a..194e5cf 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -79,7 +79,7 @@
 
 #define SKY2_EEPROM_MAGIC	0x9955aabb
 
-#define RING_NEXT(x,s)	(((x)+1) & ((s)-1))
+#define RING_NEXT(x, s)	(((x)+1) & ((s)-1))
 
 static const u32 default_msg =
     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
@@ -172,7 +172,7 @@
 		udelay(10);
 	}
 
-	dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name);
+	dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
 	return -ETIMEDOUT;
 
 io_error:
@@ -1067,7 +1067,7 @@
 	return le;
 }
 
-static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
+static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
 {
 	unsigned size;
 
@@ -1078,7 +1078,7 @@
 	return (size - 8) / sizeof(u32);
 }
 
-static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
+static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
 {
 	struct rx_ring_info *re;
 	unsigned size;
@@ -1102,7 +1102,7 @@
 }
 
 /* Build description to hardware for one receive segment */
-static void sky2_rx_add(struct sky2_port *sky2,  u8 op,
+static void sky2_rx_add(struct sky2_port *sky2, u8 op,
 			dma_addr_t map, unsigned len)
 {
 	struct sky2_rx_le *le;
@@ -3014,7 +3014,7 @@
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
 	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
 
-	switch(hw->chip_id) {
+	switch (hw->chip_id) {
 	case CHIP_ID_YUKON_XL:
 		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
 		if (hw->chip_rev < CHIP_REV_YU_XL_A2)
@@ -3685,7 +3685,7 @@
 	return 0;
 }
 
-static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
+static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
 {
 	u32 bit;
 
@@ -3911,7 +3911,7 @@
 		return -EINVAL;
 	if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
 		return -EINVAL;
-	if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
+	if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
 		return -EINVAL;
 
 	if (ecmd->tx_coalesce_usecs == 0)
@@ -4372,7 +4372,7 @@
 			seq_printf(seq, "%u:", idx);
 		sop = 0;
 
-		switch(le->opcode & ~HW_OWNER) {
+		switch (le->opcode & ~HW_OWNER) {
 		case OP_ADDR64:
 			seq_printf(seq, " %#x:", a);
 			break;
@@ -4441,7 +4441,7 @@
 	if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
 		return NOTIFY_DONE;
 
-	switch(event) {
+	switch (event) {
 	case NETDEV_CHANGENAME:
 		if (sky2->debugfs) {
 			sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
@@ -4636,7 +4636,7 @@
 	struct pci_dev *pdev = hw->pdev;
 	int err;
 
-	init_waitqueue_head (&hw->msi_wait);
+	init_waitqueue_head(&hw->msi_wait);
 
 	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
 
@@ -4753,7 +4753,7 @@
 	 * this driver uses software swapping.
 	 */
 	reg &= ~PCI_REV_DESC;
-	err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg);
+	err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
 	if (err) {
 		dev_err(&pdev->dev, "PCI write config failed\n");
 		goto err_out_free_regions;
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 144f76f..66b9da0 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -108,6 +108,7 @@
 	good_frame = 0,
 	discard_frame = 1,
 	csum_none = 2,
+	llc_snap = 4,
 };
 
 enum tx_dma_irq_status {
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index d8d0f35..8b20b19 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -93,7 +93,7 @@
 #define GMAC_CONTROL_IPC	0x00000400 /* Checksum Offload */
 #define GMAC_CONTROL_DR		0x00000200 /* Disable Retry */
 #define GMAC_CONTROL_LUD	0x00000100 /* Link up/down */
-#define GMAC_CONTROL_ACS	0x00000080 /* Automatic Pad Stripping */
+#define GMAC_CONTROL_ACS	0x00000080 /* Automatic Pad/FCS Stripping */
 #define GMAC_CONTROL_DC		0x00000010 /* Deferral Check */
 #define GMAC_CONTROL_TE		0x00000008 /* Transmitter Enable */
 #define GMAC_CONTROL_RE		0x00000004 /* Receiver Enable */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 917b4e1..2b2f5c8 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -220,6 +220,8 @@
 		((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
 
 	mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+	if (!mac)
+		return NULL;
 
 	mac->mac = &dwmac1000_ops;
 	mac->dma = &dwmac1000_dma_ops;
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 6f270a0..2fb165f 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -179,6 +179,8 @@
 	struct mac_device_info *mac;
 
 	mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+	if (!mac)
+		return NULL;
 
 	pr_info("\tDWMAC100\n");
 
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index 3c18ebe..f612f98 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -123,7 +123,7 @@
 	 */
 	if (status == 0x0) {
 		CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
-		ret = good_frame;
+		ret = llc_snap;
 	} else if (status == 0x4) {
 		CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
 		ret = good_frame;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index acf0616..bbb7951 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -829,7 +829,6 @@
 	 * In case of failure continue without timer. */
 	if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
 		pr_warning("stmmaceth: cannot attach the external timer.\n");
-		tmrate = 0;
 		priv->tm->freq = 0;
 		priv->tm->timer_start = stmmac_no_timer_started;
 		priv->tm->timer_stop = stmmac_no_timer_stopped;
@@ -1217,9 +1216,13 @@
 			priv->dev->stats.rx_errors++;
 		else {
 			struct sk_buff *skb;
-			/* Length should omit the CRC */
-			int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
+			int frame_len;
 
+			frame_len = priv->hw->desc->get_rx_frame_len(p);
+			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+			 * Type frames (LLC/LLC-SNAP) */
+			if (unlikely(status != llc_snap))
+				frame_len -= ETH_FCS_LEN;
 #ifdef STMMAC_RX_DEBUG
 			if (frame_len > ETH_FRAME_LEN)
 				pr_debug("\tRX frame size %d, COE status: %d\n",
@@ -1558,15 +1561,15 @@
 	else
 		device = dwmac100_setup(ioaddr);
 
+	if (!device)
+		return -ENOMEM;
+
 	if (priv->enh_desc) {
 		device->desc = &enh_desc_ops;
 		pr_info("\tEnhanced descriptor structure\n");
 	} else
 		device->desc = &ndesc_ops;
 
-	if (!device)
-		return -ENOMEM;
-
 	priv->hw = device;
 
 	priv->wolenabled = priv->hw->pmt;	/* PMT supported */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index b26a577..bc3af78 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,10 +69,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			112
+#define TG3_MIN_NUM			113
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"July 11, 2010"
+#define DRV_MODULE_RELDATE	"August 2, 2010"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -221,12 +221,9 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
@@ -882,7 +879,7 @@
 	unsigned int loops;
 	int ret;
 
-	if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
+	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
 		return 0;
 
@@ -1178,7 +1175,7 @@
 	case PHY_ID_BCMAC131:
 		phydev->interface = PHY_INTERFACE_MODE_MII;
 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
-		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
 		break;
 	}
 
@@ -1271,7 +1268,7 @@
 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
 
 	val = 0;
-	if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
+	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
 			val = reg << 16;
 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
@@ -1379,7 +1376,7 @@
 
 	if (autoneg == AUTONEG_ENABLE &&
 	    (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
-		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
 		else
 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -1493,7 +1490,7 @@
 {
 	struct phy_device *phydev;
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
 		return 0;
 
 	/* Bring the PHY back to a known state. */
@@ -1513,7 +1510,7 @@
 	switch (phydev->interface) {
 	case PHY_INTERFACE_MODE_GMII:
 	case PHY_INTERFACE_MODE_RGMII:
-		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 			phydev->supported &= (PHY_GBIT_FEATURES |
 					      SUPPORTED_Pause |
 					      SUPPORTED_Asym_Pause);
@@ -1530,7 +1527,7 @@
 		return -EINVAL;
 	}
 
-	tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
+	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 
 	phydev->advertising = phydev->supported;
 
@@ -1541,13 +1538,13 @@
 {
 	struct phy_device *phydev;
 
-	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 		return;
 
 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
-	if (tp->link_config.phy_is_low_power) {
-		tp->link_config.phy_is_low_power = 0;
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 		phydev->speed = tp->link_config.orig_speed;
 		phydev->duplex = tp->link_config.orig_duplex;
 		phydev->autoneg = tp->link_config.orig_autoneg;
@@ -1561,7 +1558,7 @@
 
 static void tg3_phy_stop(struct tg3 *tp)
 {
-	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 		return;
 
 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@ -1569,16 +1566,21 @@
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
-		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
+		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 	}
 }
 
-static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 {
-	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
-	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+	if (!err)
+		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+	return err;
 }
 
 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
@@ -1608,10 +1610,10 @@
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
-	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 		return;
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 		tg3_phy_fet_toggle_apd(tp, enable);
 		return;
 	}
@@ -1642,10 +1644,10 @@
 	u32 phy;
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
-	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 		return;
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 		u32 ephy;
 
 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
@@ -1681,7 +1683,7 @@
 {
 	u32 val;
 
-	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
+	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
 		return;
 
 	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
@@ -1740,7 +1742,7 @@
 	while (limit--) {
 		u32 tmp32;
 
-		if (!tg3_readphy(tp, 0x16, &tmp32)) {
+		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
 			if ((tmp32 & 0x1000) == 0)
 				break;
 		}
@@ -1766,13 +1768,13 @@
 
 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 			     (chan * 0x2000) | 0x0200);
-		tg3_writephy(tp, 0x16, 0x0002);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 
 		for (i = 0; i < 6; i++)
 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
 				     test_pat[chan][i]);
 
-		tg3_writephy(tp, 0x16, 0x0202);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 		if (tg3_wait_macro_done(tp)) {
 			*resetp = 1;
 			return -EBUSY;
@@ -1780,13 +1782,13 @@
 
 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 			     (chan * 0x2000) | 0x0200);
-		tg3_writephy(tp, 0x16, 0x0082);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
 		if (tg3_wait_macro_done(tp)) {
 			*resetp = 1;
 			return -EBUSY;
 		}
 
-		tg3_writephy(tp, 0x16, 0x0802);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
 		if (tg3_wait_macro_done(tp)) {
 			*resetp = 1;
 			return -EBUSY;
@@ -1826,10 +1828,10 @@
 
 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 			     (chan * 0x2000) | 0x0200);
-		tg3_writephy(tp, 0x16, 0x0002);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 		for (i = 0; i < 6; i++)
 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
-		tg3_writephy(tp, 0x16, 0x0202);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 		if (tg3_wait_macro_done(tp))
 			return -EBUSY;
 	}
@@ -1875,8 +1877,7 @@
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 
 		/* Block the PHY control access.  */
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
+		tg3_phydsp_write(tp, 0x8005, 0x0800);
 
 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
 		if (!err)
@@ -1887,11 +1888,10 @@
 	if (err)
 		return err;
 
-	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
-	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
+	tg3_phydsp_write(tp, 0x8005, 0x0000);
 
 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
-	tg3_writephy(tp, 0x16, 0x0000);
+	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
@@ -1984,42 +1984,37 @@
 
 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
-	    (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
 		return 0;
 
 	tg3_phy_apply_otp(tp);
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 		tg3_phy_toggle_apd(tp, true);
 	else
 		tg3_phy_toggle_apd(tp, false);
 
 out:
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
+	if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
+		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+		tg3_phydsp_write(tp, 0x000a, 0x0323);
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
 	}
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
-		tg3_writephy(tp, 0x1c, 0x8d68);
-		tg3_writephy(tp, 0x1c, 0x8d68);
+	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 	}
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
+	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
-		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
-		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
+		tg3_phydsp_write(tp, 0x000a, 0x310b);
+		tg3_phydsp_write(tp, 0x201f, 0x9506);
+		tg3_phydsp_write(tp, 0x401f, 0x14e2);
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
-	} else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-		if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
+		if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 			tg3_writephy(tp, MII_TG3_TEST1,
 				     MII_TG3_TEST1_TRIM_EN | 0x4);
@@ -2204,7 +2199,7 @@
 {
 	u32 val;
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2223,7 +2218,7 @@
 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 		udelay(40);
 		return;
-	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 		u32 phytest;
 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 			u32 phy;
@@ -2260,7 +2255,7 @@
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
-	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 		return;
 
 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
@@ -2563,14 +2558,14 @@
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		do_low_power = false;
-		if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
-		    !tp->link_config.phy_is_low_power) {
+		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 			struct phy_device *phydev;
 			u32 phyid, advertising;
 
 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
-			tp->link_config.phy_is_low_power = 1;
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
 			tp->link_config.orig_speed = phydev->speed;
 			tp->link_config.orig_duplex = phydev->duplex;
@@ -2609,14 +2604,14 @@
 	} else {
 		do_low_power = true;
 
-		if (tp->link_config.phy_is_low_power == 0) {
-			tp->link_config.phy_is_low_power = 1;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 			tp->link_config.orig_speed = tp->link_config.speed;
 			tp->link_config.orig_duplex = tp->link_config.duplex;
 			tp->link_config.orig_autoneg = tp->link_config.autoneg;
 		}
 
-		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 			tp->link_config.speed = SPEED_10;
 			tp->link_config.duplex = DUPLEX_HALF;
 			tp->link_config.autoneg = AUTONEG_ENABLE;
@@ -2649,13 +2644,13 @@
 	if (device_should_wake) {
 		u32 mac_mode;
 
-		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 			if (do_low_power) {
 				tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
 				udelay(40);
 			}
 
-			if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 				mac_mode = MAC_MODE_PORT_MODE_GMII;
 			else
 				mac_mode = MAC_MODE_PORT_MODE_MII;
@@ -2823,7 +2818,7 @@
 		break;
 
 	default:
-		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
 				 SPEED_10;
 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
@@ -2841,7 +2836,7 @@
 	u32 new_adv;
 	int i;
 
-	if (tp->link_config.phy_is_low_power) {
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 		/* Entering low power mode.  Disable gigabit and
 		 * 100baseT advertisements.
 		 */
@@ -2854,7 +2849,7 @@
 
 		tg3_writephy(tp, MII_ADVERTISE, new_adv);
 	} else if (tp->link_config.speed == SPEED_INVALID) {
-		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 			tp->link_config.advertising &=
 				~(ADVERTISED_1000baseT_Half |
 				  ADVERTISED_1000baseT_Full);
@@ -2880,7 +2875,7 @@
 				new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
 			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
 				new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
-			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
+			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
 			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
 				new_adv |= (MII_TG3_CTRL_AS_MASTER |
@@ -2982,20 +2977,11 @@
 	/* Set Extended packet length bit */
 	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
 
-	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
-	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
-
-	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
-	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
-
-	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
-	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
-
-	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
-	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
-
-	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
+	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 
 	udelay(40);
 
@@ -3020,7 +3006,7 @@
 
 	if ((adv_reg & all_mask) != all_mask)
 		return 0;
-	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 		u32 tg3_ctrl;
 
 		all_mask = 0;
@@ -3148,18 +3134,18 @@
 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
 		/* 5701 {A0,B0} CRC bug workaround */
 		tg3_writephy(tp, 0x15, 0x0a75);
-		tg3_writephy(tp, 0x1c, 0x8c68);
-		tg3_writephy(tp, 0x1c, 0x8d68);
-		tg3_writephy(tp, 0x1c, 0x8c68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 	}
 
 	/* Clear pending interrupts... */
 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
 
-	if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
+	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
-	else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
+	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -3175,7 +3161,7 @@
 	current_speed = SPEED_INVALID;
 	current_duplex = DUPLEX_INVALID;
 
-	if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
+	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 		u32 val;
 
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
@@ -3251,7 +3237,7 @@
 	}
 
 relink:
-	if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
+	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 		u32 tmp;
 
 		tg3_phy_copper_begin(tp);
@@ -3269,7 +3255,7 @@
 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 		else
 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
-	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 	else
 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -3820,7 +3806,7 @@
 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 
 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
-		if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
+		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
 		    tp->serdes_counter &&
 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
 				    MAC_STATUS_RCVD_CFG)) ==
@@ -3837,7 +3823,7 @@
 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 
 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
-		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
 				 MAC_STATUS_SIGNAL_DET)) {
 		sg_dig_status = tr32(SG_DIG_STATUS);
@@ -3860,7 +3846,7 @@
 			tg3_setup_flow_control(tp, local_adv, remote_adv);
 			current_link_up = 1;
 			tp->serdes_counter = 0;
-			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
 			if (tp->serdes_counter)
 				tp->serdes_counter--;
@@ -3887,8 +3873,8 @@
 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
 					tg3_setup_flow_control(tp, 0, 0);
 					current_link_up = 1;
-					tp->tg3_flags2 |=
-						TG3_FLG2_PARALLEL_DETECT;
+					tp->phy_flags |=
+						TG3_PHYFLG_PARALLEL_DETECT;
 					tp->serdes_counter =
 						SERDES_PARALLEL_DET_TIMEOUT;
 				} else
@@ -3897,7 +3883,7 @@
 		}
 	} else {
 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
-		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 	}
 
 out:
@@ -4114,7 +4100,7 @@
 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 
 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
-	    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 		/* do nothing, just check for link up at the end */
 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 		u32 adv, new_adv;
@@ -4139,7 +4125,7 @@
 
 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
-			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 
 			return err;
 		}
@@ -4184,7 +4170,7 @@
 				else
 					bmsr &= ~BMSR_LSTATUS;
 			}
-			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 		}
 	}
 
@@ -4239,7 +4225,7 @@
 			netif_carrier_on(tp->dev);
 		else {
 			netif_carrier_off(tp->dev);
-			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 		}
 		tg3_link_report(tp);
 	}
@@ -4263,13 +4249,14 @@
 			u32 phy1, phy2;
 
 			/* Select shadow register 0x1f */
-			tg3_writephy(tp, 0x1c, 0x7c00);
-			tg3_readphy(tp, 0x1c, &phy1);
+			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 
 			/* Select expansion interrupt status register */
-			tg3_writephy(tp, 0x17, 0x0f01);
-			tg3_readphy(tp, 0x15, &phy2);
-			tg3_readphy(tp, 0x15, &phy2);
+			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+					 MII_TG3_DSP_EXP1_INT_STAT);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 
 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
 				/* We have signal detect and not receiving
@@ -4280,17 +4267,18 @@
 				bmcr &= ~BMCR_ANENABLE;
 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
 				tg3_writephy(tp, MII_BMCR, bmcr);
-				tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
+				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 			}
 		}
 	} else if (netif_carrier_ok(tp->dev) &&
 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
-		   (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 		u32 phy2;
 
 		/* Select expansion interrupt status register */
-		tg3_writephy(tp, 0x17, 0x0f01);
-		tg3_readphy(tp, 0x15, &phy2);
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+				 MII_TG3_DSP_EXP1_INT_STAT);
+		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 		if (phy2 & 0x20) {
 			u32 bmcr;
 
@@ -4298,7 +4286,7 @@
 			tg3_readphy(tp, MII_BMCR, &bmcr);
 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 
-			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 
 		}
 	}
@@ -4308,9 +4296,9 @@
 {
 	int err;
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 		err = tg3_setup_fiber_phy(tp, force_reset);
-	else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
 	else
 		err = tg3_setup_copper_phy(tp, force_reset);
@@ -4389,7 +4377,8 @@
 
 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 {
-	smp_mb();
+	/* Tell compiler to fetch tx indices from memory. */
+	barrier();
 	return tnapi->tx_pending -
 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 }
@@ -5673,6 +5662,13 @@
 	tnapi->tx_prod = entry;
 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
 		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
 			netif_tx_wake_queue(txq);
 	}
@@ -5718,6 +5714,13 @@
 	/* Estimate the number of fragments in the worst case */
 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
 		netif_stop_queue(tp->dev);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
 			return NETDEV_TX_BUSY;
 
@@ -5953,6 +5956,13 @@
 	tnapi->tx_prod = entry;
 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
 		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
 			netif_tx_wake_queue(txq);
 	}
@@ -6929,9 +6939,13 @@
 	val = GRC_MISC_CFG_CORECLK_RESET;
 
 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-		if (tr32(0x7e2c) == 0x60) {
-			tw32(0x7e2c, 0x20);
-		}
+		/* Force PCIe 1.0a mode */
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+		    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+		    tr32(TG3_PCIE_PHY_TSTCTL) ==
+		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
 			tw32(GRC_MISC_CFG, (1 << 29));
 			val |= (1 << 29);
@@ -6944,8 +6958,11 @@
 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
 	}
 
-	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+	/* Manage gphy power for all CPMU absent PCIe devices. */
+	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+	    !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
 	tw32(GRC_MISC_CFG, val);
 
 	/* restore 5701 hardware bug workaround write method */
@@ -7002,8 +7019,7 @@
 		 * Older PCIe devices only support the 128 byte
 		 * MPS setting.  Enforce the restriction.
 		 */
-		if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
-		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
+		if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
 		pci_write_config_word(tp->pdev,
 				      tp->pcie_cap + PCI_EXP_DEVCTL,
@@ -7050,10 +7066,10 @@
 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 	}
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
 		tw32_f(MAC_MODE, tp->mac_mode);
-	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
 		tw32_f(MAC_MODE, tp->mac_mode);
 	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
@@ -7076,9 +7092,7 @@
 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+	    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 		val = tr32(0x7c00);
 
 		tw32(0x7c00, val | (1 << 25));
@@ -7751,9 +7765,7 @@
 	if (err)
 		return err;
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 		val = tr32(TG3PCI_DMA_RW_CTRL) &
 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
@@ -7916,9 +7928,7 @@
 			     BDINFO_FLAGS_DISABLED);
 		}
 
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
 			      (TG3_RX_STD_DMA_SZ << 2);
 		else
@@ -7935,9 +7945,7 @@
 			  tp->rx_jumbo_pending : 0;
 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 		tw32(STD_REPLENISH_LWM, 32);
 		tw32(JMB_REPLENISH_LWM, 16);
 	}
@@ -8065,8 +8073,8 @@
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 
-	if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
-		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 		/* reset to prevent losing 1st rx packet intermittently */
 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 		udelay(10);
@@ -8079,7 +8087,7 @@
 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
 		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
-	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -8264,16 +8272,16 @@
 	tw32(MAC_LED_CTRL, tp->led_ctrl);
 
 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 		udelay(10);
 	}
 	tw32_f(MAC_RX_MODE, tp->rx_mode);
 	udelay(10);
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
-			!(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
+			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
 			/* Set drive transmission level to 1.2V  */
 			/* only if the signal pre-emphasis bit is not set  */
 			val = tr32(MAC_SERDES_CFG);
@@ -8295,12 +8303,12 @@
 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
-	    (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 		/* Use hardware link auto-negotiation */
 		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
 	}
 
-	if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
 		u32 tmp;
 
@@ -8312,8 +8320,8 @@
 	}
 
 	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
-		if (tp->link_config.phy_is_low_power) {
-			tp->link_config.phy_is_low_power = 0;
+		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 			tp->link_config.speed = tp->link_config.orig_speed;
 			tp->link_config.duplex = tp->link_config.orig_duplex;
 			tp->link_config.autoneg = tp->link_config.orig_autoneg;
@@ -8323,15 +8331,15 @@
 		if (err)
 			return err;
 
-		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
-		    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 			u32 tmp;
 
 			/* Clear CRC stats. */
 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
 				tg3_writephy(tp, MII_TG3_TEST1,
 					     tmp | MII_TG3_TEST1_CRC_EN);
-				tg3_readphy(tp, 0x14, &tmp);
+				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
 			}
 		}
 	}
@@ -8499,7 +8507,7 @@
 			mac_stat = tr32(MAC_STATUS);
 
 			phy_event = 0;
-			if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
+			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
 					phy_event = 1;
 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
@@ -8531,7 +8539,7 @@
 				}
 				tg3_setup_phy(tp, 0);
 			}
-		} else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 			   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
 			tg3_serdes_parallel_detect(tp);
 		}
@@ -8627,9 +8635,7 @@
 	 * Turn off MSI one shot mode.  Otherwise this test has no
 	 * observable way to know whether the interrupt was delivered.
 	 */
-	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+	if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
 		tw32(MSGINT_MODE, val);
@@ -8672,9 +8678,7 @@
 
 	if (intr_ok) {
 		/* Reenable MSI one shot mode. */
-		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+		if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
 			tw32(MSGINT_MODE, val);
@@ -8865,7 +8869,7 @@
 	else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
 		pci_disable_msi(tp->pdev);
 	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
-	tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
+	tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
 }
 
 static int tg3_open(struct net_device *dev)
@@ -8969,11 +8973,8 @@
 			goto err_out2;
 		}
 
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
-		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
-		    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
+		if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
 			u32 val = tr32(PCIE_TRANSACTION_CFG);
 
 			tw32(PCIE_TRANSACTION_CFG,
@@ -9068,7 +9069,7 @@
 {
 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
 		u32 val;
@@ -9077,7 +9078,7 @@
 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
 			tg3_writephy(tp, MII_TG3_TEST1,
 				     val | MII_TG3_TEST1_CRC_EN);
-			tg3_readphy(tp, 0x14, &val);
+			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
 		} else
 			val = 0;
 		spin_unlock_bh(&tp->lock);
@@ -9367,7 +9368,7 @@
 
 	memset(p, 0, TG3_REGDUMP_LEN);
 
-	if (tp->link_config.phy_is_low_power)
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 		return;
 
 	tg3_full_lock(tp, 0);
@@ -9446,7 +9447,7 @@
 	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
 		return -EINVAL;
 
-	if (tp->link_config.phy_is_low_power)
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 		return -EAGAIN;
 
 	offset = eeprom->offset;
@@ -9508,7 +9509,7 @@
 	u8 *buf;
 	__be32 start, end;
 
-	if (tp->link_config.phy_is_low_power)
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 		return -EAGAIN;
 
 	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -9565,7 +9566,7 @@
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		struct phy_device *phydev;
-		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		return phy_ethtool_gset(phydev, cmd);
@@ -9573,11 +9574,11 @@
 
 	cmd->supported = (SUPPORTED_Autoneg);
 
-	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 		cmd->supported |= (SUPPORTED_1000baseT_Half |
 				   SUPPORTED_1000baseT_Full);
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 		cmd->supported |= (SUPPORTED_100baseT_Half |
 				  SUPPORTED_100baseT_Full |
 				  SUPPORTED_10baseT_Half |
@@ -9608,7 +9609,7 @@
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		struct phy_device *phydev;
-		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		return phy_ethtool_sset(phydev, cmd);
@@ -9628,11 +9629,11 @@
 			   ADVERTISED_Pause |
 			   ADVERTISED_Asym_Pause;
 
-		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 			mask |= ADVERTISED_1000baseT_Half |
 				ADVERTISED_1000baseT_Full;
 
-		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 			mask |= ADVERTISED_100baseT_Half |
 				ADVERTISED_100baseT_Full |
 				ADVERTISED_10baseT_Half |
@@ -9653,7 +9654,7 @@
 
 		cmd->advertising &= mask;
 	} else {
-		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
 			if (cmd->speed != SPEED_1000)
 				return -EINVAL;
 
@@ -9789,11 +9790,11 @@
 	if (!netif_running(dev))
 		return -EAGAIN;
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 		return -EINVAL;
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
-		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 	} else {
@@ -9804,7 +9805,7 @@
 		tg3_readphy(tp, MII_BMCR, &bmcr);
 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
 		    ((bmcr & BMCR_ANENABLE) ||
-		     (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
+		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
 						   BMCR_ANENABLE);
 			r = 0;
@@ -9939,7 +9940,7 @@
 		else
 			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
 
-		if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 			u32 oldadv = phydev->advertising &
 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
 			if (oldadv != newadv) {
@@ -10268,7 +10269,7 @@
 	if (!netif_running(tp->dev))
 		return -ENODEV;
 
-	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 		max = TG3_SERDES_TIMEOUT_SEC;
 	else
 		max = TG3_COPPER_TIMEOUT_SEC;
@@ -10630,7 +10631,7 @@
 			   MAC_MODE_PORT_INT_LPBACK;
 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 			mac_mode |= MAC_MODE_LINK_POLARITY;
-		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 			mac_mode |= MAC_MODE_PORT_MODE_MII;
 		else
 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10638,7 +10639,7 @@
 	} else if (loopback_mode == TG3_PHY_LOOPBACK) {
 		u32 val;
 
-		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 			tg3_phy_fet_toggle_apd(tp, false);
 			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
 		} else
@@ -10650,7 +10651,7 @@
 		udelay(40);
 
 		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
-		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 			tg3_writephy(tp, MII_TG3_FET_PTEST,
 				     MII_TG3_FET_PTEST_FRC_TX_LINK |
 				     MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -10662,7 +10663,7 @@
 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 
 		/* reset to prevent losing 1st rx packet intermittently */
-		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 			udelay(10);
 			tw32_f(MAC_RX_MODE, tp->rx_mode);
@@ -10793,7 +10794,7 @@
 		return TG3_LOOPBACK_FAILED;
 
 	/* Turn off gphy autopowerdown. */
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 		tg3_phy_toggle_apd(tp, false);
 
 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
@@ -10830,14 +10831,14 @@
 		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
 	}
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
 		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
 			err |= TG3_PHY_LOOPBACK_FAILED;
 	}
 
 	/* Re-enable gphy autopowerdown. */
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 		tg3_phy_toggle_apd(tp, true);
 
 	return err;
@@ -10848,7 +10849,7 @@
 {
 	struct tg3 *tp = netdev_priv(dev);
 
-	if (tp->link_config.phy_is_low_power)
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 		tg3_set_power_state(tp, PCI_D0);
 
 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -10880,7 +10881,7 @@
 		if (!err)
 			tg3_nvram_unlock(tp);
 
-		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 			tg3_phy_reset(tp);
 
 		if (tg3_test_registers(tp) != 0) {
@@ -10916,7 +10917,7 @@
 		if (irq_sync && !err2)
 			tg3_phy_start(tp);
 	}
-	if (tp->link_config.phy_is_low_power)
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 		tg3_set_power_state(tp, PCI_D3hot);
 
 }
@@ -10929,7 +10930,7 @@
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		struct phy_device *phydev;
-		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 			return -EAGAIN;
 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		return phy_mii_ioctl(phydev, ifr, cmd);
@@ -10943,10 +10944,10 @@
 	case SIOCGMIIREG: {
 		u32 mii_regval;
 
-		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->link_config.phy_is_low_power)
+		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -10959,10 +10960,10 @@
 	}
 
 	case SIOCSMIIREG:
-		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->link_config.phy_is_low_power)
+		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -12090,9 +12091,9 @@
 		tp->phy_id = eeprom_phy_id;
 		if (eeprom_phy_serdes) {
 			if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
-				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 			else
-				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
+				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
 		}
 
 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12176,7 +12177,7 @@
 			(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
 			tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
 
-		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
 			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
 
@@ -12185,19 +12186,21 @@
 			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
 
 		if (cfg2 & (1 << 17))
-			tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
+			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
 
 		/* serdes signal pre-emphasis in register 0x590 set by */
 		/* bootcode if bit 18 is set */
 		if (cfg2 & (1 << 18))
-			tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 
 		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
-			tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
+			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 
-		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+		if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+		    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 			u32 cfg3;
 
 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
@@ -12302,9 +12305,9 @@
 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
 		tp->phy_id = hw_phy_id;
 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
-			tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 		else
-			tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
+			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
 	} else {
 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
 			/* Do nothing, phy ID already set up in
@@ -12323,11 +12326,11 @@
 			tp->phy_id = p->phy_id;
 			if (!tp->phy_id ||
 			    tp->phy_id == TG3_PHY_ID_BCM8002)
-				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
 		}
 	}
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
 	    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
 		u32 bmsr, adv_reg, tg3_ctrl, mask;
@@ -12345,7 +12348,7 @@
 			   ADVERTISE_100HALF | ADVERTISE_100FULL |
 			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
 		tg3_ctrl = 0;
-		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
 				    MII_TG3_CTRL_ADV_1000_FULL);
 			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
@@ -12360,7 +12363,7 @@
 		if (!tg3_copper_is_advertising_all(tp, mask)) {
 			tg3_writephy(tp, MII_ADVERTISE, adv_reg);
 
-			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
 
 			tg3_writephy(tp, MII_BMCR,
@@ -12369,7 +12372,7 @@
 		tg3_phy_set_wirespeed(tp);
 
 		tg3_writephy(tp, MII_ADVERTISE, adv_reg);
-		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
 			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
 	}
 
@@ -12382,13 +12385,13 @@
 		err = tg3_init_5401phy_dsp(tp);
 	}
 
-	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 		tp->link_config.advertising =
 			(ADVERTISED_1000baseT_Half |
 			 ADVERTISED_1000baseT_Full |
 			 ADVERTISED_Autoneg |
 			 ADVERTISED_FIBRE);
-	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+	if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 		tp->link_config.advertising &=
 			~(ADVERTISED_1000baseT_Half |
 			  ADVERTISED_1000baseT_Full);
@@ -12717,6 +12720,7 @@
 {
 	int vlen;
 	u32 apedata;
+	char *fwtype;
 
 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
 	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
@@ -12732,9 +12736,15 @@
 
 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
 
+	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+		fwtype = "NCSI";
+	else
+		fwtype = "DASH";
+
 	vlen = strlen(tp->fw_ver);
 
-	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
+	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+		 fwtype,
 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
@@ -12988,6 +12998,11 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 		tp->pdev_peer = tg3_find_peer(tp);
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
+
 	/* Intentionally exclude ASIC_REV_5906 */
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -12995,9 +13010,7 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
 		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -13027,9 +13040,7 @@
 	}
 
 	/* Determine TSO capabilities */
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
 	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13065,9 +13076,7 @@
 			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
 		}
 
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
 			tp->irq_max = TG3_IRQ_MAX_VECS;
 		}
@@ -13082,9 +13091,7 @@
 		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
 	}
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13285,9 +13292,7 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
 		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 
 	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -13345,41 +13350,39 @@
 	}
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
-		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
 
 	/* A few boards don't want Ethernet@WireSpeed phy feature */
 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
 	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
-	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
-	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
-		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
 
 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
-		tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
+		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
-		tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
+		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
 
 	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
-	    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
+	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+	    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
-				tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
+				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
-				tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
+				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
 		} else
-			tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
 	}
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
@@ -13492,8 +13495,8 @@
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
-	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
-		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
+		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
 
 	err = tg3_phy_probe(tp);
 	if (err) {
@@ -13505,13 +13508,13 @@
 	tg3_read_vpd(tp);
 	tg3_read_fw_ver(tp);
 
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
-		tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
 	} else {
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
-			tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
+			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
 		else
-			tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
 	}
 
 	/* 5700 {AX,BX} chips have a broken status block link
@@ -13529,13 +13532,13 @@
 	 */
 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
-	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
-		tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
-				  TG3_FLAG_USE_LINKCHG_REG);
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
 	}
 
 	/* For all SERDES we poll the MAC status register. */
-	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 		tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
 	else
 		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
@@ -13705,9 +13708,7 @@
 #endif
 #endif
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 		goto out;
 	}
@@ -13918,9 +13919,7 @@
 
 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 		goto out;
 
 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
@@ -14110,7 +14109,6 @@
 	tp->link_config.autoneg = AUTONEG_ENABLE;
 	tp->link_config.active_speed = SPEED_INVALID;
 	tp->link_config.active_duplex = DUPLEX_INVALID;
-	tp->link_config.phy_is_low_power = 0;
 	tp->link_config.orig_speed = SPEED_INVALID;
 	tp->link_config.orig_duplex = DUPLEX_INVALID;
 	tp->link_config.orig_autoneg = AUTONEG_INVALID;
@@ -14118,9 +14116,7 @@
 
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
 		tp->bufmgr_config.mbuf_read_dma_low_water =
 			DEFAULT_MB_RDMA_LOW_WATER_5705;
 		tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14645,24 +14641,31 @@
 		    tg3_bus_string(tp, str),
 		    dev->dev_addr);
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 		struct phy_device *phydev;
 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		netdev_info(dev,
 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
 			    phydev->drv->name, dev_name(&phydev->dev));
-	} else
+	} else {
+		char *ethtype;
+
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			ethtype = "10/100Base-TX";
+		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+			ethtype = "1000Base-SX";
+		else
+			ethtype = "10/100/1000Base-T";
+
 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
-			    "(WireSpeed[%d])\n", tg3_phy_string(tp),
-			    ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
-			     ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
-			      "10/100/1000Base-T")),
-			    (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
+			    "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
+			  (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
+	}
 
 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
 		    (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
 		    (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
-		    (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
 		    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
 		    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0432399..4937bd1 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1844,6 +1844,10 @@
 #define  TG3_PCIE_LNKCTL_L1_PLL_PD_DIS	 0x00000080
 /* 0x7d58 --> 0x7e70 unused */
 
+#define TG3_PCIE_PHY_TSTCTL		0x00007e2c
+#define  TG3_PCIE_PHY_TSTCTL_PCIE10	 0x00000040
+#define  TG3_PCIE_PHY_TSTCTL_PSCRAM	 0x00000020
+
 #define TG3_PCIE_EIDLE_DELAY		0x00007e70
 #define  TG3_PCIE_EIDLE_DELAY_MASK	 0x0000001f
 #define  TG3_PCIE_EIDLE_DELAY_13_CLKS	 0x0000000c
@@ -2053,8 +2057,9 @@
 #define MII_TG3_EXT_STAT		0x11 /* Extended status register */
 #define  MII_TG3_EXT_STAT_LPASS		0x0100
 
+#define MII_TG3_RXR_COUNTERS		0x14 /* Local/Remote Receiver Counts */
 #define MII_TG3_DSP_RW_PORT		0x15 /* DSP coefficient read/write port */
-
+#define MII_TG3_DSP_CONTROL		0x16 /* DSP control register */
 #define MII_TG3_DSP_ADDRESS		0x17 /* DSP address register */
 
 #define MII_TG3_DSP_TAP1		0x0001
@@ -2062,6 +2067,7 @@
 #define MII_TG3_DSP_AADJ1CH0		0x001f
 #define MII_TG3_DSP_AADJ1CH3		0x601f
 #define  MII_TG3_DSP_AADJ1CH3_ADCCKADJ	0x0002
+#define MII_TG3_DSP_EXP1_INT_STAT	0x0f01
 #define MII_TG3_DSP_EXP8		0x0f08
 #define  MII_TG3_DSP_EXP8_REJ2MHz	0x0001
 #define  MII_TG3_DSP_EXP8_AEDW		0x0200
@@ -2157,6 +2163,8 @@
 /* APE shared memory.  Accessible through BAR1 */
 #define TG3_APE_FW_STATUS		0x400c
 #define  APE_FW_STATUS_READY		 0x00000100
+#define TG3_APE_FW_FEATURES		0x4010
+#define  TG3_APE_FW_FEATURE_NCSI	 0x00000002
 #define TG3_APE_FW_VERSION		0x4018
 #define  APE_FW_VERSION_MAJMSK		 0xff000000
 #define  APE_FW_VERSION_MAJSFT		 24
@@ -2526,7 +2534,6 @@
 	/* When we go in and out of low power mode we need
 	 * to swap with this state.
 	 */
-	int				phy_is_low_power;
 	u16				orig_speed;
 	u8				orig_duplex;
 	u8				orig_autoneg;
@@ -2767,7 +2774,6 @@
 #define TG3_FLAG_TXD_MBOX_HWBUG		0x00000002
 #define TG3_FLAG_RX_CHECKSUMS		0x00000004
 #define TG3_FLAG_USE_LINKCHG_REG	0x00000008
-#define TG3_FLAG_USE_MI_INTERRUPT	0x00000010
 #define TG3_FLAG_ENABLE_ASF		0x00000020
 #define TG3_FLAG_ASPM_WORKAROUND	0x00000040
 #define TG3_FLAG_POLL_SERDES		0x00000080
@@ -2789,7 +2795,6 @@
 #define TG3_FLAG_TX_RECOVERY_PENDING	0x00200000
 #define TG3_FLAG_WOL_CAP		0x00400000
 #define TG3_FLAG_JUMBO_RING_ENABLE	0x00800000
-#define TG3_FLAG_10_100_ONLY		0x01000000
 #define TG3_FLAG_PAUSE_AUTONEG		0x02000000
 #define TG3_FLAG_CPMU_PRESENT		0x04000000
 #define TG3_FLAG_40BIT_DMA_BUG		0x08000000
@@ -2800,22 +2805,15 @@
 	u32				tg3_flags2;
 #define TG3_FLG2_RESTART_TIMER		0x00000001
 #define TG3_FLG2_TSO_BUG		0x00000002
-#define TG3_FLG2_NO_ETH_WIRE_SPEED	0x00000004
 #define TG3_FLG2_IS_5788		0x00000008
 #define TG3_FLG2_MAX_RXPEND_64		0x00000010
 #define TG3_FLG2_TSO_CAPABLE		0x00000020
-#define TG3_FLG2_PHY_ADC_BUG		0x00000040
-#define TG3_FLG2_PHY_5704_A0_BUG	0x00000080
-#define TG3_FLG2_PHY_BER_BUG		0x00000100
 #define TG3_FLG2_PCI_EXPRESS		0x00000200
 #define TG3_FLG2_ASF_NEW_HANDSHAKE	0x00000400
 #define TG3_FLG2_HW_AUTONEG		0x00000800
 #define TG3_FLG2_IS_NIC			0x00001000
-#define TG3_FLG2_PHY_SERDES		0x00002000
-#define TG3_FLG2_CAPACITIVE_COUPLING	0x00004000
 #define TG3_FLG2_FLASH			0x00008000
 #define TG3_FLG2_HW_TSO_1		0x00010000
-#define TG3_FLG2_SERDES_PREEMPHASIS	0x00020000
 #define TG3_FLG2_5705_PLUS		0x00040000
 #define TG3_FLG2_5750_PLUS		0x00080000
 #define TG3_FLG2_HW_TSO_3		0x00100000
@@ -2823,10 +2821,6 @@
 #define TG3_FLG2_USING_MSIX		0x00400000
 #define TG3_FLG2_USING_MSI_OR_MSIX	(TG3_FLG2_USING_MSI | \
 					TG3_FLG2_USING_MSIX)
-#define TG3_FLG2_MII_SERDES		0x00800000
-#define TG3_FLG2_ANY_SERDES		(TG3_FLG2_PHY_SERDES |	\
-					TG3_FLG2_MII_SERDES)
-#define TG3_FLG2_PARALLEL_DETECT	0x01000000
 #define TG3_FLG2_ICH_WORKAROUND		0x02000000
 #define TG3_FLG2_5780_CLASS		0x04000000
 #define TG3_FLG2_HW_TSO_2		0x08000000
@@ -2834,9 +2828,7 @@
 					 TG3_FLG2_HW_TSO_2 | \
 					 TG3_FLG2_HW_TSO_3)
 #define TG3_FLG2_1SHOT_MSI		0x10000000
-#define TG3_FLG2_PHY_JITTER_BUG		0x20000000
 #define TG3_FLG2_NO_FWARE_REPORTED	0x40000000
-#define TG3_FLG2_PHY_ADJUST_TRIM	0x80000000
 	u32				tg3_flags3;
 #define TG3_FLG3_NO_NVRAM_ADDR_TRANS	0x00000001
 #define TG3_FLG3_ENABLE_APE		0x00000002
@@ -2844,15 +2836,12 @@
 #define TG3_FLG3_5701_DMA_BUG		0x00000008
 #define TG3_FLG3_USE_PHYLIB		0x00000010
 #define TG3_FLG3_MDIOBUS_INITED		0x00000020
-#define TG3_FLG3_PHY_CONNECTED		0x00000080
 #define TG3_FLG3_RGMII_INBAND_DISABLE	0x00000100
 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN	0x00000200
 #define TG3_FLG3_RGMII_EXT_IBND_TX_EN	0x00000400
 #define TG3_FLG3_CLKREQ_BUG		0x00000800
-#define TG3_FLG3_PHY_ENABLE_APD		0x00001000
 #define TG3_FLG3_5755_PLUS		0x00002000
 #define TG3_FLG3_NO_NVRAM		0x00004000
-#define TG3_FLG3_PHY_IS_FET		0x00010000
 #define TG3_FLG3_ENABLE_RSS		0x00020000
 #define TG3_FLG3_ENABLE_TSS		0x00040000
 #define TG3_FLG3_4G_DMA_BNDRY_BUG	0x00080000
@@ -2860,6 +2849,7 @@
 #define TG3_FLG3_SHORT_DMA_BUG		0x00200000
 #define TG3_FLG3_USE_JUMBO_BDFLAG	0x00400000
 #define TG3_FLG3_L1PLLPD_EN		0x00800000
+#define TG3_FLG3_5717_PLUS		0x01000000
 
 	struct timer_list		timer;
 	u16				timer_counter;
@@ -2956,6 +2946,27 @@
 	 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
 	 (X) == TG3_PHY_ID_BCM8002)
 
+	u32				phy_flags;
+#define TG3_PHYFLG_IS_LOW_POWER		0x00000001
+#define TG3_PHYFLG_IS_CONNECTED		0x00000002
+#define TG3_PHYFLG_USE_MI_INTERRUPT	0x00000004
+#define TG3_PHYFLG_PHY_SERDES		0x00000010
+#define TG3_PHYFLG_MII_SERDES		0x00000020
+#define TG3_PHYFLG_ANY_SERDES		(TG3_PHYFLG_PHY_SERDES |	\
+					TG3_PHYFLG_MII_SERDES)
+#define TG3_PHYFLG_IS_FET		0x00000040
+#define TG3_PHYFLG_10_100_ONLY		0x00000080
+#define TG3_PHYFLG_ENABLE_APD		0x00000100
+#define TG3_PHYFLG_CAPACITIVE_COUPLING	0x00000200
+#define TG3_PHYFLG_NO_ETH_WIRE_SPEED	0x00000400
+#define TG3_PHYFLG_JITTER_BUG		0x00000800
+#define TG3_PHYFLG_ADJUST_TRIM		0x00001000
+#define TG3_PHYFLG_ADC_BUG		0x00002000
+#define TG3_PHYFLG_5704_A0_BUG		0x00004000
+#define TG3_PHYFLG_BER_BUG		0x00008000
+#define TG3_PHYFLG_SERDES_PREEMPHASIS	0x00010000
+#define TG3_PHYFLG_PARALLEL_DETECT	0x00020000
+
 	u32				led_ctrl;
 	u32				phy_otp;
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 14e5312..3a8d7ef 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1341,6 +1341,12 @@
         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
 		pr_err(PFX "skipping LMC card\n");
 		return -ENODEV;
+	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
+		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
+		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
+		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
+		pr_err(PFX "skipping SBE T3E3 port\n");
+		return -ENODEV;
 	}
 
 	/*
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6ad6fe7..55f3a3e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -149,6 +149,7 @@
 	tfile->tun = tun;
 	tun->tfile = tfile;
 	tun->socket.file = file;
+	netif_carrier_on(tun->dev);
 	dev_hold(tun->dev);
 	sock_hold(tun->socket.sk);
 	atomic_inc(&tfile->count);
@@ -162,6 +163,7 @@
 {
 	/* Detach from net device */
 	netif_tx_lock_bh(tun->dev);
+	netif_carrier_off(tun->dev);
 	tun->tfile = NULL;
 	tun->socket.file = NULL;
 	netif_tx_unlock_bh(tun->dev);
@@ -736,8 +738,18 @@
 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
 			else if (sinfo->gso_type & SKB_GSO_UDP)
 				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
-			else
-				BUG();
+			else {
+				printk(KERN_ERR "tun: unexpected GSO type: "
+				       "0x%x, gso_size %d, hdr_len %d\n",
+				       sinfo->gso_type, gso.gso_size,
+				       gso.hdr_len);
+				print_hex_dump(KERN_ERR, "tun: ",
+					       DUMP_PREFIX_NONE,
+					       16, 1, skb->head,
+					       min((int)gso.hdr_len, 64), true);
+				WARN_ON_ONCE(1);
+				return -EINVAL;
+			}
 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
 				gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
 		} else
@@ -1564,12 +1576,6 @@
 #endif
 }
 
-static u32 tun_get_link(struct net_device *dev)
-{
-	struct tun_struct *tun = netdev_priv(dev);
-	return !!tun->tfile;
-}
-
 static u32 tun_get_rx_csum(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
@@ -1591,7 +1597,7 @@
 	.get_drvinfo	= tun_get_drvinfo,
 	.get_msglevel	= tun_get_msglevel,
 	.set_msglevel	= tun_set_msglevel,
-	.get_link	= tun_get_link,
+	.get_link	= ethtool_op_get_link,
 	.get_rx_csum	= tun_get_rx_csum,
 	.set_rx_csum	= tun_set_rx_csum
 };
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e17dd74..8d532f9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -594,7 +594,7 @@
 {
 	int i;
 
-	ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
+	ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
 	ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
 
 	ugeth_info("maccfg1    : addr - 0x%08x, val - 0x%08x",
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a684c..6efca66 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -477,6 +477,7 @@
 	{USB_DEVICE(0x0af0, 0x8600)},
 	{USB_DEVICE(0x0af0, 0x8800)},
 	{USB_DEVICE(0x0af0, 0x8900)},
+	{USB_DEVICE(0x0af0, 0x9000)},
 	{USB_DEVICE(0x0af0, 0xd035)},
 	{USB_DEVICE(0x0af0, 0xd055)},
 	{USB_DEVICE(0x0af0, 0xd155)},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7eab407..3b03794 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -44,6 +44,7 @@
 #include <linux/usb.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #define DRIVER_VERSION		"22-Aug-2005"
 
@@ -158,16 +159,6 @@
 }
 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
 
-static u8 nibble(unsigned char c)
-{
-	if (likely(isdigit(c)))
-		return c - '0';
-	c = toupper(c);
-	if (likely(isxdigit(c)))
-		return 10 + c - 'A';
-	return 0;
-}
-
 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
 {
 	int 		tmp, i;
@@ -183,7 +174,7 @@
 	}
 	for (i = tmp = 0; i < 6; i++, tmp += 2)
 		dev->net->dev_addr [i] =
-			(nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
+			(hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@ -624,7 +615,7 @@
 	while (!skb_queue_empty(&dev->rxq)
 		&& !skb_queue_empty(&dev->txq)
 		&& !skb_queue_empty(&dev->done)) {
-			schedule_timeout(UNLINK_TIMEOUT_MS);
+			schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			netif_dbg(dev, ifdown, dev->net,
 				  "waited for %d urb completions\n", temp);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9d64186..abe0ff5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -664,8 +664,13 @@
 	while (len) {
 		u32 buf_size;
 
-		buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
-			   VMXNET3_MAX_TX_BUF_SIZE : len;
+		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+			buf_size = len;
+			dw2 |= len;
+		} else {
+			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+			/* spec says that for TxDesc.len, 0 == 2^14 */
+		}
 
 		tbi = tq->buf_info + tq->tx_ring.next2fill;
 		tbi->map_type = VMXNET3_MAP_SINGLE;
@@ -673,13 +678,13 @@
 				skb->data + buf_offset, buf_size,
 				PCI_DMA_TODEVICE);
 
-		tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
+		tbi->len = buf_size;
 
 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 
 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
-		gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
+		gdesc->dword[2] = cpu_to_le32(dw2);
 		gdesc->dword[3] = 0;
 
 		dev_dbg(&adapter->netdev->dev,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 762a6a7..2121c73 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.0.13.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.0.14.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01000B00
+#define VMXNET3_DRIVER_VERSION_NUM      0x01000E00
 
 
 /*
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 94d87e8..c7c5605 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -41,6 +41,8 @@
 *
 ******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
@@ -144,7 +146,7 @@
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 		vdev->ndev->name, __func__, __LINE__);
-	printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+	netdev_notice(vdev->ndev, "Link Up\n");
 	vdev->stats.link_up++;
 
 	netif_carrier_on(vdev->ndev);
@@ -168,7 +170,7 @@
 
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-	printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
+	netdev_notice(vdev->ndev, "Link Down\n");
 
 	vdev->stats.link_down++;
 	netif_carrier_off(vdev->ndev);
@@ -2679,7 +2681,7 @@
 
 	if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
 		netif_carrier_on(vdev->ndev);
-		printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+		netdev_notice(vdev->ndev, "Link Up\n");
 		vdev->stats.link_up++;
 	}
 
@@ -2817,7 +2819,7 @@
 	}
 
 	netif_carrier_off(vdev->ndev);
-	printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
+	netdev_notice(vdev->ndev, "Link Down\n");
 	netif_tx_stop_all_queues(vdev->ndev);
 
 	/* Note that at this point xmit() is stopped by upper layer */
@@ -3844,9 +3846,7 @@
 	struct vxgedev *vdev = netdev_priv(netdev);
 
 	if (pci_enable_device(pdev)) {
-		printk(KERN_ERR "%s: "
-			"Cannot re-enable device after reset\n",
-			VXGE_DRIVER_NAME);
+		netdev_err(netdev, "Cannot re-enable device after reset\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 
@@ -3871,9 +3871,8 @@
 
 	if (netif_running(netdev)) {
 		if (vxge_open(netdev)) {
-			printk(KERN_ERR "%s: "
-				"Can't bring device back up after reset\n",
-				VXGE_DRIVER_NAME);
+			netdev_err(netdev,
+				   "Can't bring device back up after reset\n");
 			return;
 		}
 	}
@@ -4430,13 +4429,9 @@
 vxge_starter(void)
 {
 	int ret = 0;
-	char version[32];
-	snprintf(version, 32, "%s", DRV_VERSION);
 
-	printk(KERN_INFO "%s: Copyright(c) 2002-2010 Exar Corp.\n",
-		VXGE_DRIVER_NAME);
-	printk(KERN_INFO "%s: Driver version: %s\n",
-			VXGE_DRIVER_NAME, version);
+	pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
+	pr_info("Driver version: %s\n", DRV_VERSION);
 
 	verify_bandwidth();
 
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 43b7727..ad7719f 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -15,6 +15,8 @@
  *      Maintainer:  Kevin Curtis  <kevin.curtis@farsite.co.uk>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/version.h>
@@ -511,21 +513,19 @@
  * support variable numbers of macro parameters. The inverted if prevents us
  * eating someone else's else clause.
  */
-#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \
-                                ; \
-                        else \
-                                printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
-
+#define dbg(F, fmt, args...)					\
+do {								\
+	if (fst_debug_mask & (F))				\
+		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
+} while (0)
 #else
-#define dbg(X...)		/* NOP */
+#define dbg(F, fmt, args...)					\
+do {								\
+	if (0)							\
+		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
+} while (0)
 #endif
 
-/*      Printing short cuts
- */
-#define printk_err(fmt,A...)    printk ( KERN_ERR     FST_NAME ": " fmt, ## A )
-#define printk_warn(fmt,A...)   printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
-#define printk_info(fmt,A...)   printk ( KERN_INFO    FST_NAME ": " fmt, ## A )
-
 /*
  *      PCI ID lookup table
  */
@@ -961,7 +961,7 @@
 		spin_lock_irqsave(&card->card_lock, flags);
 
 		if (++safety > 2000) {
-			printk_err("Mailbox safety timeout\n");
+			pr_err("Mailbox safety timeout\n");
 			break;
 		}
 
@@ -1241,8 +1241,8 @@
 		 * This seems to happen on the TE1 interface sometimes
 		 * so throw the frame away and log the event.
 		 */
-		printk_err("Frame received with 0 length. Card %d Port %d\n",
-			   card->card_no, port->index);
+		pr_err("Frame received with 0 length. Card %d Port %d\n",
+		       card->card_no, port->index);
 		/* Return descriptor to card */
 		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
 
@@ -1486,9 +1486,8 @@
 	 */
 	dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
 	if (card->state != FST_RUNNING) {
-		printk_err
-		    ("Interrupt received for card %d in a non running state (%d)\n",
-		     card->card_no, card->state);
+		pr_err("Interrupt received for card %d in a non running state (%d)\n",
+		       card->card_no, card->state);
 
 		/* 
 		 * It is possible to really be running, i.e. we have re-loaded
@@ -1614,8 +1613,7 @@
 			break;
 
 		default:
-			printk_err("intr: unknown card event %d. ignored\n",
-				   event);
+			pr_err("intr: unknown card event %d. ignored\n", event);
 			break;
 		}
 
@@ -1637,13 +1635,13 @@
 
 	/* Check structure version and end marker */
 	if (FST_RDW(card, smcVersion) != SMC_VERSION) {
-		printk_err("Bad shared memory version %d expected %d\n",
-			   FST_RDW(card, smcVersion), SMC_VERSION);
+		pr_err("Bad shared memory version %d expected %d\n",
+		       FST_RDW(card, smcVersion), SMC_VERSION);
 		card->state = FST_BADVERSION;
 		return;
 	}
 	if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
-		printk_err("Missing shared memory signature\n");
+		pr_err("Missing shared memory signature\n");
 		card->state = FST_BADVERSION;
 		return;
 	}
@@ -1651,11 +1649,11 @@
 	if ((i = FST_RDB(card, taskStatus)) == 0x01) {
 		card->state = FST_RUNNING;
 	} else if (i == 0xFF) {
-		printk_err("Firmware initialisation failed. Card halted\n");
+		pr_err("Firmware initialisation failed. Card halted\n");
 		card->state = FST_HALTED;
 		return;
 	} else if (i != 0x00) {
-		printk_err("Unknown firmware status 0x%x\n", i);
+		pr_err("Unknown firmware status 0x%x\n", i);
 		card->state = FST_HALTED;
 		return;
 	}
@@ -1665,9 +1663,10 @@
 	 * existing firmware etc so we just report it for the moment.
 	 */
 	if (FST_RDL(card, numberOfPorts) != card->nports) {
-		printk_warn("Port count mismatch on card %d."
-			    " Firmware thinks %d we say %d\n", card->card_no,
-			    FST_RDL(card, numberOfPorts), card->nports);
+		pr_warning("Port count mismatch on card %d. "
+			   "Firmware thinks %d we say %d\n",
+			   card->card_no,
+			   FST_RDL(card, numberOfPorts), card->nports);
 	}
 }
 
@@ -2090,9 +2089,8 @@
 		 */
 
 		if (card->state != FST_RUNNING) {
-			printk_err
-			    ("Attempt to configure card %d in non-running state (%d)\n",
-			     card->card_no, card->state);
+			pr_err("Attempt to configure card %d in non-running state (%d)\n",
+			       card->card_no, card->state);
 			return -EIO;
 		}
 		if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
@@ -2384,8 +2382,8 @@
                 err = register_hdlc_device(card->ports[i].dev);
                 if (err < 0) {
 			int j;
-                        printk_err ("Cannot register HDLC device for port %d"
-                                    " (errno %d)\n", i, -err );
+			pr_err("Cannot register HDLC device for port %d (errno %d)\n",
+			       i, -err);
 			for (j = i; j < card->nports; j++) {
 				free_netdev(card->ports[j].dev);
 				card->ports[j].dev = NULL;
@@ -2395,10 +2393,10 @@
                 }
 	}
 
-	printk_info("%s-%s: %s IRQ%d, %d ports\n",
-	       port_to_dev(&card->ports[0])->name,
-	       port_to_dev(&card->ports[card->nports - 1])->name,
-	       type_strings[card->type], card->irq, card->nports);
+	pr_info("%s-%s: %s IRQ%d, %d ports\n",
+		port_to_dev(&card->ports[0])->name,
+		port_to_dev(&card->ports[card->nports - 1])->name,
+		type_strings[card->type], card->irq, card->nports);
 }
 
 static const struct net_device_ops fst_ops = {
@@ -2417,19 +2415,17 @@
 static int __devinit
 fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	static int firsttime_done = 0;
 	static int no_of_cards_added = 0;
 	struct fst_card_info *card;
 	int err = 0;
 	int i;
 
-	if (!firsttime_done) {
-		printk_info("FarSync WAN driver " FST_USER_VERSION
-		       " (c) 2001-2004 FarSite Communications Ltd.\n");
-		firsttime_done = 1;
-		dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
-	}
-
+	printk_once(KERN_INFO
+		    pr_fmt("FarSync WAN driver " FST_USER_VERSION
+			   " (c) 2001-2004 FarSite Communications Ltd.\n"));
+#if FST_DEBUG
+	dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
+#endif
 	/*
 	 * We are going to be clever and allow certain cards not to be
 	 * configured.  An exclude list can be provided in /etc/modules.conf
@@ -2441,8 +2437,8 @@
 		 */
 		for (i = 0; i < fst_excluded_cards; i++) {
 			if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
-				printk_info("FarSync PCI device %d not assigned\n",
-				       (pdev->devfn) >> 3);
+				pr_info("FarSync PCI device %d not assigned\n",
+					(pdev->devfn) >> 3);
 				return -EBUSY;
 			}
 		}
@@ -2451,20 +2447,19 @@
 	/* Allocate driver private data */
 	card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
 	if (card == NULL) {
-		printk_err("FarSync card found but insufficient memory for"
-			   " driver storage\n");
+		pr_err("FarSync card found but insufficient memory for driver storage\n");
 		return -ENOMEM;
 	}
 
 	/* Try to enable the device */
 	if ((err = pci_enable_device(pdev)) != 0) {
-		printk_err("Failed to enable card. Err %d\n", -err);
+		pr_err("Failed to enable card. Err %d\n", -err);
 		kfree(card);
 		return err;
 	}
 
 	if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
-	        printk_err("Failed to allocate regions. Err %d\n", -err);
+		pr_err("Failed to allocate regions. Err %d\n", -err);
 		pci_disable_device(pdev);
 		kfree(card);
 	        return err;
@@ -2475,14 +2470,14 @@
 	card->phys_mem = pci_resource_start(pdev, 2);
 	card->phys_ctlmem = pci_resource_start(pdev, 3);
 	if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
-		printk_err("Physical memory remap failed\n");
+		pr_err("Physical memory remap failed\n");
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		kfree(card);
 		return -ENODEV;
 	}
 	if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
-		printk_err("Control memory remap failed\n");
+		pr_err("Control memory remap failed\n");
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		kfree(card);
@@ -2492,7 +2487,7 @@
 
 	/* Register the interrupt handler */
 	if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
-		printk_err("Unable to register interrupt %d\n", card->irq);
+		pr_err("Unable to register interrupt %d\n", card->irq);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		iounmap(card->ctlmem);
@@ -2523,7 +2518,7 @@
 		if (!dev) {
 			while (i--)
 				free_netdev(card->ports[i].dev);
-			printk_err ("FarSync: out of memory\n");
+			pr_err("FarSync: out of memory\n");
                         free_irq(card->irq, card);
                         pci_release_regions(pdev);
                         pci_disable_device(pdev);
@@ -2587,7 +2582,7 @@
 		    pci_alloc_consistent(card->device, FST_MAX_MTU,
 					 &card->rx_dma_handle_card);
 		if (card->rx_dma_handle_host == NULL) {
-			printk_err("Could not allocate rx dma buffer\n");
+			pr_err("Could not allocate rx dma buffer\n");
 			fst_disable_intr(card);
 			pci_release_regions(pdev);
 			pci_disable_device(pdev);
@@ -2600,7 +2595,7 @@
 		    pci_alloc_consistent(card->device, FST_MAX_MTU,
 					 &card->tx_dma_handle_card);
 		if (card->tx_dma_handle_host == NULL) {
-			printk_err("Could not allocate tx dma buffer\n");
+			pr_err("Could not allocate tx dma buffer\n");
 			fst_disable_intr(card);
 			pci_release_regions(pdev);
 			pci_disable_device(pdev);
@@ -2672,7 +2667,7 @@
 static void __exit
 fst_cleanup_module(void)
 {
-	printk_info("FarSync WAN driver unloading\n");
+	pr_info("FarSync WAN driver unloading\n");
 	pci_unregister_driver(&fst_driver);
 }
 
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 2d7c96d..eb80243 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,7 @@
 	/* Device IDs */
 	USB_DEVICE_ID_I6050 = 0x0186,
 	USB_DEVICE_ID_I6050_2 = 0x0188,
+	USB_DEVICE_ID_I6250 = 0x0187,
 };
 
 
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 0d5081d..d3365ac 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -491,6 +491,7 @@
 	switch (id->idProduct) {
 	case USB_DEVICE_ID_I6050:
 	case USB_DEVICE_ID_I6050_2:
+	case USB_DEVICE_ID_I6250:
 		i2400mu->i6050 = 1;
 		break;
 	default:
@@ -739,6 +740,7 @@
 struct usb_device_id i2400mu_id_table[] = {
 	{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
 	{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+	{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
 	{ USB_DEVICE(0x8086, 0x0181) },
 	{ USB_DEVICE(0x8086, 0x1403) },
 	{ USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index dabafb8..fe7418a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -63,6 +63,7 @@
 				      u8 rxchainmask,
 				      struct ath9k_cal_list *currCal)
 {
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	bool iscaldone = false;
 
 	if (currCal->calState == CAL_RUNNING) {
@@ -81,14 +82,14 @@
 				}
 
 				currCal->calData->calPostProc(ah, numChains);
-				ichan->CalValid |= currCal->calData->calType;
+				caldata->CalValid |= currCal->calData->calType;
 				currCal->calState = CAL_DONE;
 				iscaldone = true;
 			} else {
 				ar9002_hw_setup_calibration(ah, currCal);
 			}
 		}
-	} else if (!(ichan->CalValid & currCal->calData->calType)) {
+	} else if (!(caldata->CalValid & currCal->calData->calType)) {
 		ath9k_hw_reset_calibration(ah, currCal);
 	}
 
@@ -686,8 +687,13 @@
 {
 	bool iscaldone = true;
 	struct ath9k_cal_list *currCal = ah->cal_list_curr;
+	bool nfcal, nfcal_pending = false;
 
-	if (currCal &&
+	nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
+	if (ah->caldata)
+		nfcal_pending = ah->caldata->nfcal_pending;
+
+	if (currCal && !nfcal &&
 	    (currCal->calState == CAL_RUNNING ||
 	     currCal->calState == CAL_WAITING)) {
 		iscaldone = ar9002_hw_per_calibration(ah, chan,
@@ -703,7 +709,7 @@
 	}
 
 	/* Do NF cal only at longer intervals */
-	if (longcal) {
+	if (longcal || nfcal_pending) {
 		/* Do periodic PAOffset Cal */
 		ar9002_hw_pa_cal(ah, false);
 		ar9002_hw_olc_temp_compensation(ah);
@@ -712,16 +718,18 @@
 		 * Get the value from the previous NF cal and update
 		 * history buffer.
 		 */
-		ath9k_hw_getnf(ah, chan);
+		if (ath9k_hw_getnf(ah, chan)) {
+			/*
+			 * Load the NF from history buffer of the current
+			 * channel.
+			 * NF is slow time-variant, so it is OK to use a
+			 * historical value.
+			 */
+			ath9k_hw_loadnf(ah, ah->curchan);
+		}
 
-		/*
-		 * Load the NF from history buffer of the current channel.
-		 * NF is slow time-variant, so it is OK to use a historical
-		 * value.
-		 */
-		ath9k_hw_loadnf(ah, ah->curchan);
-
-		ath9k_hw_start_nfcal(ah);
+		if (longcal)
+			ath9k_hw_start_nfcal(ah, false);
 	}
 
 	return iscaldone;
@@ -869,8 +877,10 @@
 	ar9002_hw_pa_cal(ah, true);
 
 	/* Do NF Calibration after DC offset and other calibrations */
-	REG_WRITE(ah, AR_PHY_AGC_CONTROL,
-		  REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+	ath9k_hw_start_nfcal(ah, true);
+
+	if (ah->caldata)
+		ah->caldata->nfcal_pending = true;
 
 	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
@@ -901,7 +911,8 @@
 			ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
 	}
 
-	chan->CalValid = 0;
+	if (ah->caldata)
+		ah->caldata->CalValid = 0;
 
 	return true;
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 5a06503..4674ea8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -68,6 +68,7 @@
 				      u8 rxchainmask,
 				      struct ath9k_cal_list *currCal)
 {
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	/* Cal is assumed not done until explicitly set below */
 	bool iscaldone = false;
 
@@ -95,7 +96,7 @@
 				currCal->calData->calPostProc(ah, numChains);
 
 				/* Calibration has finished. */
-				ichan->CalValid |= currCal->calData->calType;
+				caldata->CalValid |= currCal->calData->calType;
 				currCal->calState = CAL_DONE;
 				iscaldone = true;
 			} else {
@@ -106,7 +107,7 @@
 			ar9003_hw_setup_calibration(ah, currCal);
 			}
 		}
-	} else if (!(ichan->CalValid & currCal->calData->calType)) {
+	} else if (!(caldata->CalValid & currCal->calData->calType)) {
 		/* If current cal is marked invalid in channel, kick it off */
 		ath9k_hw_reset_calibration(ah, currCal);
 	}
@@ -149,6 +150,12 @@
 	/* Do NF cal only at longer intervals */
 	if (longcal) {
 		/*
+		 * Get the value from the previous NF cal and update
+		 * history buffer.
+		 */
+		ath9k_hw_getnf(ah, chan);
+
+		/*
 		 * Load the NF from history buffer of the current channel.
 		 * NF is slow time-variant, so it is OK to use a historical
 		 * value.
@@ -156,7 +163,7 @@
 		ath9k_hw_loadnf(ah, ah->curchan);
 
 		/* start NF calibration, without updating BB NF register */
-		ath9k_hw_start_nfcal(ah);
+		ath9k_hw_start_nfcal(ah, false);
 	}
 
 	return iscaldone;
@@ -762,6 +769,8 @@
 	/* Revert chainmasks to their original values before NF cal */
 	ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
 
+	ath9k_hw_start_nfcal(ah, true);
+
 	/* Initialize list pointers */
 	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
@@ -785,7 +794,8 @@
 	if (ah->cal_list_curr)
 		ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
 
-	chan->CalValid = 0;
+	if (ah->caldata)
+		ah->caldata->CalValid = 0;
 
 	return true;
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ace8d26..b883b17 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -41,6 +41,20 @@
 #define LE16(x) __constant_cpu_to_le16(x)
 #define LE32(x) __constant_cpu_to_le32(x)
 
+/* Local defines to distinguish between extension and control CTL's */
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+#define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6  /* 10*log10(2)*2 */
+#define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9  /* 10*log10(3)*2 */
+#define PWRINCR_3_TO_1_CHAIN      9             /* 10*log(3)*2 */
+#define PWRINCR_3_TO_2_CHAIN      3             /* floor(10*log(3/2)*2) */
+#define PWRINCR_2_TO_1_CHAIN      6             /* 10*log(2)*2 */
+
+#define SUB_NUM_CTL_MODES_AT_5G_40 2    /* excluding HT40, EXT-OFDM */
+#define SUB_NUM_CTL_MODES_AT_2G_40 3    /* excluding HT40, EXT-OFDM, EXT-CCK */
+
 static const struct ar9300_eeprom ar9300_default = {
 	.eepromVersion = 2,
 	.templateVersion = 2,
@@ -609,6 +623,14 @@
 	 }
 };
 
+static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
+{
+	if (fbin == AR9300_BCHAN_UNUSED)
+		return fbin;
+
+	return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
 {
 	return 0;
@@ -1417,9 +1439,9 @@
 #undef POW_SM
 }
 
-static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
+					      u8 *targetPowerValT2)
 {
-	u8 targetPowerValT2[ar9300RateSize];
 	/* XXX: hard code for now, need to get from eeprom struct */
 	u8 ht40PowerIncForPdadc = 0;
 	bool is2GHz = false;
@@ -1553,9 +1575,6 @@
 			  "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
 		i++;
 	}
-
-	/* Write target power array to registers */
-	ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
 }
 
 static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
@@ -1799,14 +1818,369 @@
 	return 0;
 }
 
+static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
+					   int idx,
+					   int edge,
+					   bool is2GHz)
+{
+	struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+	struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+	if (is2GHz)
+		return ctl_2g[idx].ctlEdges[edge].tPower;
+	else
+		return ctl_5g[idx].ctlEdges[edge].tPower;
+}
+
+static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
+					     int idx,
+					     unsigned int edge,
+					     u16 freq,
+					     bool is2GHz)
+{
+	struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+	struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+	u8 *ctl_freqbin = is2GHz ?
+		&eep->ctl_freqbin_2G[idx][0] :
+		&eep->ctl_freqbin_5G[idx][0];
+
+	if (is2GHz) {
+		if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
+		    ctl_2g[idx].ctlEdges[edge - 1].flag)
+			return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+	} else {
+		if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
+		    ctl_5g[idx].ctlEdges[edge - 1].flag)
+			return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+	}
+
+	return AR9300_MAX_RATE_POWER;
+}
+
+/*
+ * Find the maximum conformance test limit for the given channel and CTL info
+ */
+static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
+					u16 freq, int idx, bool is2GHz)
+{
+	u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+	u8 *ctl_freqbin = is2GHz ?
+		&eep->ctl_freqbin_2G[idx][0] :
+		&eep->ctl_freqbin_5G[idx][0];
+	u16 num_edges = is2GHz ?
+		AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G;
+	unsigned int edge;
+
+	/* Get the edge power */
+	for (edge = 0;
+	     (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED);
+	     edge++) {
+		/*
+		 * If there's an exact channel match or an inband flag set
+		 * on the lower channel use the given rdEdgePower
+		 */
+		if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) {
+			twiceMaxEdgePower =
+				ar9003_hw_get_direct_edge_power(eep, idx,
+								edge, is2GHz);
+			break;
+		} else if ((edge > 0) &&
+			   (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge],
+						      is2GHz))) {
+			twiceMaxEdgePower =
+				ar9003_hw_get_indirect_edge_power(eep, idx,
+								  edge, freq,
+								  is2GHz);
+			/*
+			 * Leave loop - no more affecting edges possible in
+			 * this monotonic increasing list
+			 */
+			break;
+		}
+	}
+	return twiceMaxEdgePower;
+}
+
+static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
+					       struct ath9k_channel *chan,
+					       u8 *pPwrArray, u16 cfgCtl,
+					       u8 twiceAntennaReduction,
+					       u8 twiceMaxRegulatoryPower,
+					       u16 powerLimit)
+{
+	struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
+	u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+	static const u16 tpScaleReductionTable[5] = {
+		0, 3, 6, 9, AR9300_MAX_RATE_POWER
+	};
+	int i;
+	int16_t  twiceLargestAntenna;
+	u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+	u16 ctlModesFor11a[] = {
+		CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
+	};
+	u16 ctlModesFor11g[] = {
+		CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
+		CTL_11G_EXT, CTL_2GHT40
+	};
+	u16 numCtlModes, *pCtlMode, ctlMode, freq;
+	struct chan_centers centers;
+	u8 *ctlIndex;
+	u8 ctlNum;
+	u16 twiceMinEdgePower;
+	bool is2ghz = IS_CHAN_2GHZ(chan);
+
+	ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+	/* Compute TxPower reduction due to Antenna Gain */
+	if (is2ghz)
+		twiceLargestAntenna = pEepData->modalHeader2G.antennaGain;
+	else
+		twiceLargestAntenna = pEepData->modalHeader5G.antennaGain;
+
+	twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) -
+				twiceLargestAntenna, 0);
+
+	/*
+	 * scaledPower is the minimum of the user input power level
+	 * and the regulatory allowed power level
+	 */
+	maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
+
+	if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) {
+		maxRegAllowedPower -=
+			(tpScaleReductionTable[(regulatory->tp_scale)] * 2);
+	}
+
+	scaledPower = min(powerLimit, maxRegAllowedPower);
+
+	/*
+	 * Reduce scaled Power by number of chains active to get
+	 * to per chain tx power level
+	 */
+	switch (ar5416_get_ntxchains(ah->txchainmask)) {
+	case 1:
+		break;
+	case 2:
+		scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+		break;
+	case 3:
+		scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+		break;
+	}
+
+	scaledPower = max((u16)0, scaledPower);
+
+	/*
+	 * Get target powers from EEPROM - our baseline for TX Power
+	 */
+	if (is2ghz) {
+		/* Setup for CTL modes */
+		/* CTL_11B, CTL_11G, CTL_2GHT20 */
+		numCtlModes =
+			ARRAY_SIZE(ctlModesFor11g) -
+				   SUB_NUM_CTL_MODES_AT_2G_40;
+		pCtlMode = ctlModesFor11g;
+		if (IS_CHAN_HT40(chan))
+			/* All 2G CTL's */
+			numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+	} else {
+		/* Setup for CTL modes */
+		/* CTL_11A, CTL_5GHT20 */
+		numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
+					 SUB_NUM_CTL_MODES_AT_5G_40;
+		pCtlMode = ctlModesFor11a;
+		if (IS_CHAN_HT40(chan))
+			/* All 5G CTL's */
+			numCtlModes = ARRAY_SIZE(ctlModesFor11a);
+	}
+
+	/*
+	 * For MIMO, need to apply regulatory caps individually across
+	 * dynamically running modes: CCK, OFDM, HT20, HT40
+	 *
+	 * The outer loop walks through each possible applicable runtime mode.
+	 * The inner loop walks through each ctlIndex entry in EEPROM.
+	 * The ctl value is encoded as [7:4] == test group, [3:0] == test mode.
+	 */
+	for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+		bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+			(pCtlMode[ctlMode] == CTL_2GHT40);
+		if (isHt40CtlMode)
+			freq = centers.synth_center;
+		else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+			freq = centers.ext_center;
+		else
+			freq = centers.ctl_center;
+
+		ath_print(common, ATH_DBG_REGULATORY,
+			  "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
+			  "EXT_ADDITIVE %d\n",
+			  ctlMode, numCtlModes, isHt40CtlMode,
+			  (pCtlMode[ctlMode] & EXT_ADDITIVE));
+
+		/* walk through each CTL index stored in EEPROM */
+		if (is2ghz) {
+			ctlIndex = pEepData->ctlIndex_2G;
+			ctlNum = AR9300_NUM_CTLS_2G;
+		} else {
+			ctlIndex = pEepData->ctlIndex_5G;
+			ctlNum = AR9300_NUM_CTLS_5G;
+		}
+
+		for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
+			ath_print(common, ATH_DBG_REGULATORY,
+				  "LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
+				  "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
+				  "chan %dn",
+				  i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
+				  chan->channel);
+
+				/*
+				 * compare test group from regulatory
+				 * channel list with test mode from pCtlMode
+				 * list
+				 */
+				if ((((cfgCtl & ~CTL_MODE_M) |
+				       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+					ctlIndex[i]) ||
+				    (((cfgCtl & ~CTL_MODE_M) |
+				       (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+				     ((ctlIndex[i] & CTL_MODE_M) |
+				       SD_NO_CTL))) {
+					twiceMinEdgePower =
+					  ar9003_hw_get_max_edge_power(pEepData,
+								       freq, i,
+								       is2ghz);
+
+					if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+						/*
+						 * Find the minimum of all CTL
+						 * edge powers that apply to
+						 * this channel
+						 */
+						twiceMaxEdgePower =
+							min(twiceMaxEdgePower,
+							    twiceMinEdgePower);
+						else {
+							/* specific */
+							twiceMaxEdgePower =
+							  twiceMinEdgePower;
+							break;
+						}
+				}
+			}
+
+			minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+
+			ath_print(common, ATH_DBG_REGULATORY,
+				  "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d "
+				  "sP %d minCtlPwr %d\n",
+				  ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+				  scaledPower, minCtlPower);
+
+			/* Apply ctl mode to correct target power set */
+			switch (pCtlMode[ctlMode]) {
+			case CTL_11B:
+				for (i = ALL_TARGET_LEGACY_1L_5L;
+				     i <= ALL_TARGET_LEGACY_11S; i++)
+					pPwrArray[i] =
+					  (u8)min((u16)pPwrArray[i],
+						  minCtlPower);
+				break;
+			case CTL_11A:
+			case CTL_11G:
+				for (i = ALL_TARGET_LEGACY_6_24;
+				     i <= ALL_TARGET_LEGACY_54; i++)
+					pPwrArray[i] =
+					  (u8)min((u16)pPwrArray[i],
+						  minCtlPower);
+				break;
+			case CTL_5GHT20:
+			case CTL_2GHT20:
+				for (i = ALL_TARGET_HT20_0_8_16;
+				     i <= ALL_TARGET_HT20_21; i++)
+					pPwrArray[i] =
+					  (u8)min((u16)pPwrArray[i],
+						  minCtlPower);
+				pPwrArray[ALL_TARGET_HT20_22] =
+				  (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
+					  minCtlPower);
+				pPwrArray[ALL_TARGET_HT20_23] =
+				  (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
+					   minCtlPower);
+				break;
+			case CTL_5GHT40:
+			case CTL_2GHT40:
+				for (i = ALL_TARGET_HT40_0_8_16;
+				     i <= ALL_TARGET_HT40_23; i++)
+					pPwrArray[i] =
+					  (u8)min((u16)pPwrArray[i],
+						  minCtlPower);
+				break;
+			default:
+			    break;
+			}
+	} /* end ctl mode checking */
+}
+
 static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
 					struct ath9k_channel *chan, u16 cfgCtl,
 					u8 twiceAntennaReduction,
 					u8 twiceMaxRegulatoryPower,
 					u8 powerLimit)
 {
-	ah->txpower_limit = powerLimit;
-	ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+	struct ath_common *common = ath9k_hw_common(ah);
+	u8 targetPowerValT2[ar9300RateSize];
+	unsigned int i = 0;
+
+	ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2);
+	ar9003_hw_set_power_per_rate_table(ah, chan,
+					   targetPowerValT2, cfgCtl,
+					   twiceAntennaReduction,
+					   twiceMaxRegulatoryPower,
+					   powerLimit);
+
+	while (i < ar9300RateSize) {
+		ath_print(common, ATH_DBG_EEPROM,
+			  "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+		i++;
+		ath_print(common, ATH_DBG_EEPROM,
+			  "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+		i++;
+		ath_print(common, ATH_DBG_EEPROM,
+			  "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+		i++;
+		ath_print(common, ATH_DBG_EEPROM,
+			  "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]);
+		i++;
+	}
+
+	/* Write target power array to registers */
+	ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+
+	/*
+	 * This is the TX power we send back to driver core,
+	 * and it can use to pass to userspace to display our
+	 * currently configured TX power setting.
+	 *
+	 * Since power is rate dependent, use one of the indices
+	 * from the AR9300_Rates enum to select an entry from
+	 * targetPowerValT2[] to report. Currently returns the
+	 * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps
+	 * as CCK power is less interesting (?).
+	 */
+	i = ALL_TARGET_LEGACY_6_24; /* legacy */
+	if (IS_CHAN_HT40(chan))
+		i = ALL_TARGET_HT40_0_8_16; /* ht40 */
+	else if (IS_CHAN_HT20(chan))
+		i = ALL_TARGET_HT20_0_8_16; /* ht20 */
+
+	ah->txpower_limit = targetPowerValT2[i];
+
 	ar9003_hw_calibration_apply(ah, chan->channel);
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 49e0c86..7c38229 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -577,10 +577,11 @@
 }
 
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
-					struct ath9k_channel *chan, int chain)
+					struct ath9k_hw_cal_data *caldata,
+					int chain)
 {
-	u32 *paprd_table_val = chan->pa_table[chain];
-	u32 small_signal_gain = chan->small_signal_gain[chain];
+	u32 *paprd_table_val = caldata->pa_table[chain];
+	u32 small_signal_gain = caldata->small_signal_gain[chain];
 	u32 training_power;
 	u32 reg = 0;
 	int i;
@@ -654,17 +655,17 @@
 }
 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
 
-int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
-			      int chain)
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+			      struct ath9k_hw_cal_data *caldata, int chain)
 {
-	u16 *small_signal_gain = &chan->small_signal_gain[chain];
-	u32 *pa_table = chan->pa_table[chain];
+	u16 *small_signal_gain = &caldata->small_signal_gain[chain];
+	u32 *pa_table = caldata->pa_table[chain];
 	u32 *data_L, *data_U;
 	int i, status = 0;
 	u32 *buf;
 	u32 reg;
 
-	memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
+	memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
 
 	buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
 	if (!buf)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index a753a43..a491854 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -542,7 +542,11 @@
 		u32 reg = INI_RA(iniArr, i, 0);
 		u32 val = INI_RA(iniArr, i, column);
 
-		REG_WRITE(ah, reg, val);
+		if (reg >= 0x16000 && reg < 0x17000)
+			ath9k_hw_analog_shift_regwrite(ah, reg, val);
+		else
+			REG_WRITE(ah, reg, val);
+
 		DO_DELAY(regWrites);
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 998ae2c..07f26ee 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -510,7 +510,7 @@
 #define SC_OP_BEACONS                BIT(1)
 #define SC_OP_RXAGGR                 BIT(2)
 #define SC_OP_TXAGGR                 BIT(3)
-#define SC_OP_FULL_RESET             BIT(4)
+#define SC_OP_OFFCHANNEL             BIT(4)
 #define SC_OP_PREAMBLE_SHORT         BIT(5)
 #define SC_OP_PROTECT_ENABLE         BIT(6)
 #define SC_OP_RXFLUSH                BIT(7)
@@ -609,6 +609,7 @@
 struct ath_wiphy {
 	struct ath_softc *sc; /* shared for all virtual wiphys */
 	struct ieee80211_hw *hw;
+	struct ath9k_hw_cal_data caldata;
 	enum ath_wiphy_state {
 		ATH_WIPHY_INACTIVE,
 		ATH_WIPHY_ACTIVE,
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 139289e..4520869 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -22,23 +22,6 @@
 /* We can tune this as we go by monitoring really low values */
 #define ATH9K_NF_TOO_LOW	-60
 
-/* AR5416 may return very high value (like -31 dBm), in those cases the nf
- * is incorrect and we should use the static NF value. Later we can try to
- * find out why they are reporting these values */
-
-static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
-{
-	if (nf > ATH9K_NF_TOO_LOW) {
-		ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
-			  "noise floor value detected (%d) is "
-			  "lower than what we think is a "
-			  "reasonable value (%d)\n",
-			  nf, ATH9K_NF_TOO_LOW);
-		return false;
-	}
-	return true;
-}
-
 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
 {
 	int16_t nfval;
@@ -121,6 +104,19 @@
 	ah->cal_samples = 0;
 }
 
+static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
+				   struct ath9k_channel *chan)
+{
+	struct ath_nf_limits *limit;
+
+	if (!chan || IS_CHAN_2GHZ(chan))
+		limit = &ah->nf_2g;
+	else
+		limit = &ah->nf_5g;
+
+	return limit->nominal;
+}
+
 /* This is done for the currently configured channel */
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 {
@@ -128,7 +124,7 @@
 	struct ieee80211_conf *conf = &common->hw->conf;
 	struct ath9k_cal_list *currCal = ah->cal_list_curr;
 
-	if (!ah->curchan)
+	if (!ah->caldata)
 		return true;
 
 	if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
@@ -151,37 +147,55 @@
 		  "Resetting Cal %d state for channel %u\n",
 		  currCal->calData->calType, conf->channel->center_freq);
 
-	ah->curchan->CalValid &= ~currCal->calData->calType;
+	ah->caldata->CalValid &= ~currCal->calData->calType;
 	currCal->calState = CAL_WAITING;
 
 	return false;
 }
 EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
 
-void ath9k_hw_start_nfcal(struct ath_hw *ah)
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
 {
+	if (ah->caldata)
+		ah->caldata->nfcal_pending = true;
+
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
 		    AR_PHY_AGC_CONTROL_ENABLE_NF);
-	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+
+	if (update)
+		REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+	else
+		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
 }
 
 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-	struct ath9k_nfcal_hist *h;
+	struct ath9k_nfcal_hist *h = NULL;
 	unsigned i, j;
 	int32_t val;
 	u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
 	struct ath_common *common = ath9k_hw_common(ah);
+	s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
 
-	h = ah->nfCalHist;
+	if (ah->caldata)
+		h = ah->caldata->nfCalHist;
 
 	for (i = 0; i < NUM_NF_READINGS; i++) {
 		if (chainmask & (1 << i)) {
+			s16 nfval;
+
+			if (h)
+				nfval = h[i].privNF;
+			else
+				nfval = default_nf;
+
 			val = REG_READ(ah, ah->nf_regs[i]);
 			val &= 0xFFFFFE00;
-			val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+			val |= (((u32) nfval << 1) & 0x1ff);
 			REG_WRITE(ah, ah->nf_regs[i], val);
 		}
 	}
@@ -277,22 +291,25 @@
 	}
 }
 
-int16_t ath9k_hw_getnf(struct ath_hw *ah,
-		       struct ath9k_channel *chan)
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	int16_t nf, nfThresh;
 	int16_t nfarray[NUM_NF_READINGS] = { 0 };
 	struct ath9k_nfcal_hist *h;
 	struct ieee80211_channel *c = chan->chan;
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+	if (!caldata)
+		return false;
 
 	chan->channelFlags &= (~CHANNEL_CW_INT);
 	if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
 		ath_print(common, ATH_DBG_CALIBRATE,
 			  "NF did not complete in calibration window\n");
 		nf = 0;
-		chan->rawNoiseFloor = nf;
-		return chan->rawNoiseFloor;
+		caldata->rawNoiseFloor = nf;
+		return false;
 	} else {
 		ath9k_hw_do_getnf(ah, nfarray);
 		ath9k_hw_nf_sanitize(ah, nfarray);
@@ -307,47 +324,40 @@
 		}
 	}
 
-	h = ah->nfCalHist;
-
+	h = caldata->nfCalHist;
+	caldata->nfcal_pending = false;
 	ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
-	chan->rawNoiseFloor = h[0].privNF;
-
-	return chan->rawNoiseFloor;
+	caldata->rawNoiseFloor = h[0].privNF;
+	return true;
 }
 
-void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+				  struct ath9k_channel *chan)
 {
-	struct ath_nf_limits *limit;
+	struct ath9k_nfcal_hist *h;
+	s16 default_nf;
 	int i, j;
 
-	if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan))
-		limit = &ah->nf_2g;
-	else
-		limit = &ah->nf_5g;
+	if (!ah->caldata)
+		return;
 
+	h = ah->caldata->nfCalHist;
+	default_nf = ath9k_hw_get_default_nf(ah, chan);
 	for (i = 0; i < NUM_NF_READINGS; i++) {
-		ah->nfCalHist[i].currIndex = 0;
-		ah->nfCalHist[i].privNF = limit->nominal;
-		ah->nfCalHist[i].invalidNFcount =
-			AR_PHY_CCA_FILTERWINDOW_LENGTH;
+		h[i].currIndex = 0;
+		h[i].privNF = default_nf;
+		h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
 		for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
-			ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal;
+			h[i].nfCalBuffer[j] = default_nf;
 		}
 	}
 }
 
 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-	s16 nf;
+	if (!ah->caldata || !ah->caldata->rawNoiseFloor)
+		return ath9k_hw_get_default_nf(ah, chan);
 
-	if (chan->rawNoiseFloor == 0)
-		nf = -96;
-	else
-		nf = chan->rawNoiseFloor;
-
-	if (!ath9k_hw_nf_in_range(ah, nf))
-		nf = ATH_DEFAULT_NOISE_FLOOR;
-
-	return nf;
+	return ah->caldata->rawNoiseFloor;
 }
 EXPORT_SYMBOL(ath9k_hw_getchan_noise);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index cd60d09..0a304b3 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -108,11 +108,11 @@
 };
 
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
-void ath9k_hw_start_nfcal(struct ath_hw *ah);
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update);
 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
-int16_t ath9k_hw_getnf(struct ath_hw *ah,
-		       struct ath9k_channel *chan);
-void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+				  struct ath9k_channel *chan);
 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
 void ath9k_hw_reset_calibration(struct ath_hw *ah,
 				struct ath9k_cal_list *currCal);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 3756400..43b9e21 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -353,6 +353,8 @@
 	u16 seq_no;
 	u32 bmiss_cnt;
 
+	struct ath9k_hw_cal_data caldata[38];
+
 	spinlock_t beacon_lock;
 
 	bool tx_queues_stop;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index cf9bcc6..ebed9d1 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -125,6 +125,7 @@
 	struct ieee80211_conf *conf = &common->hw->conf;
 	bool fastcc = true;
 	struct ieee80211_channel *channel = hw->conf.channel;
+	struct ath9k_hw_cal_data *caldata;
 	enum htc_phymode mode;
 	__be16 htc_mode;
 	u8 cmd_rsp;
@@ -149,7 +150,8 @@
 		  priv->ah->curchan->channel,
 		  channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
 
-	ret = ath9k_hw_reset(ah, hchan, fastcc);
+	caldata = &priv->caldata[channel->hw_value];
+	ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
 	if (ret) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset channel (%u Mhz) "
@@ -1028,7 +1030,7 @@
 		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
 	/* Reset the HW */
-	ret = ath9k_hw_reset(ah, ah->curchan, false);
+	ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
 	if (ret) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d "
@@ -1091,7 +1093,7 @@
 		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
 	/* Reset the HW */
-	ret = ath9k_hw_reset(ah, ah->curchan, false);
+	ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
 	if (ret) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d "
@@ -1179,7 +1181,7 @@
 	ath9k_hw_configpcipowersave(ah, 0, 0);
 
 	ath9k_hw_htc_resetinit(ah);
-	ret = ath9k_hw_reset(ah, init_channel, false);
+	ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
 	if (ret) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d "
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8d291cc..3384ca1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -610,7 +610,6 @@
 	else
 		ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
 
-	ath9k_init_nfcal_hist_buffer(ah);
 	ah->bb_watchdog_timeout_ms = 25;
 
 	common->state = ATH_HW_INITIALIZED;
@@ -1183,9 +1182,6 @@
 
 	ath9k_hw_spur_mitigate_freq(ah, chan);
 
-	if (!chan->oneTimeCalsDone)
-		chan->oneTimeCalsDone = true;
-
 	return true;
 }
 
@@ -1218,7 +1214,7 @@
 EXPORT_SYMBOL(ath9k_hw_check_alive);
 
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
-		    bool bChannelChange)
+		   struct ath9k_hw_cal_data *caldata, bool bChannelChange)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	u32 saveLedState;
@@ -1243,9 +1239,19 @@
 	if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
 		return -EIO;
 
-	if (curchan && !ah->chip_fullsleep)
+	if (curchan && !ah->chip_fullsleep && ah->caldata)
 		ath9k_hw_getnf(ah, curchan);
 
+	ah->caldata = caldata;
+	if (caldata &&
+	    (chan->channel != caldata->channel ||
+	     (chan->channelFlags & ~CHANNEL_CW_INT) !=
+	     (caldata->channelFlags & ~CHANNEL_CW_INT))) {
+		/* Operating channel changed, reset channel calibration data */
+		memset(caldata, 0, sizeof(*caldata));
+		ath9k_init_nfcal_hist_buffer(ah, chan);
+	}
+
 	if (bChannelChange &&
 	    (ah->chip_fullsleep != true) &&
 	    (ah->curchan != NULL) &&
@@ -1256,7 +1262,7 @@
 
 		if (ath9k_hw_channel_change(ah, chan)) {
 			ath9k_hw_loadnf(ah, ah->curchan);
-			ath9k_hw_start_nfcal(ah);
+			ath9k_hw_start_nfcal(ah, true);
 			return 0;
 		}
 	}
@@ -1461,11 +1467,8 @@
 	if (ah->btcoex_hw.enabled)
 		ath9k_hw_btcoex_enable(ah);
 
-	if (AR_SREV_9300_20_OR_LATER(ah)) {
-		ath9k_hw_loadnf(ah, curchan);
-		ath9k_hw_start_nfcal(ah);
+	if (AR_SREV_9300_20_OR_LATER(ah))
 		ar9003_hw_bb_watchdog_config(ah);
-	}
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2d30efc..399f7c1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -346,19 +346,25 @@
 	 CHANNEL_HT40PLUS |			\
 	 CHANNEL_HT40MINUS)
 
+struct ath9k_hw_cal_data {
+	u16 channel;
+	u32 channelFlags;
+	int32_t CalValid;
+	int8_t iCoff;
+	int8_t qCoff;
+	int16_t rawNoiseFloor;
+	bool paprd_done;
+	bool nfcal_pending;
+	u16 small_signal_gain[AR9300_MAX_CHAINS];
+	u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
+	struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+};
+
 struct ath9k_channel {
 	struct ieee80211_channel *chan;
 	u16 channel;
 	u32 channelFlags;
 	u32 chanmode;
-	int32_t CalValid;
-	bool oneTimeCalsDone;
-	int8_t iCoff;
-	int8_t qCoff;
-	int16_t rawNoiseFloor;
-	bool paprd_done;
-	u16 small_signal_gain[AR9300_MAX_CHAINS];
-	u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
 };
 
 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -669,7 +675,7 @@
 	enum nl80211_iftype opmode;
 	enum ath9k_power_mode power_mode;
 
-	struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+	struct ath9k_hw_cal_data *caldata;
 	struct ath9k_pacal_info pacal_info;
 	struct ar5416Stats stats;
 	struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
@@ -863,7 +869,7 @@
 void ath9k_hw_deinit(struct ath_hw *ah);
 int ath9k_hw_init(struct ath_hw *ah);
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
-		   bool bChannelChange);
+		   struct ath9k_hw_cal_data *caldata, bool bChannelChange);
 int ath9k_hw_fill_cap_info(struct ath_hw *ah);
 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
 
@@ -958,9 +964,10 @@
 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
 void ar9003_paprd_enable(struct ath_hw *ah, bool val);
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
-					struct ath9k_channel *chan, int chain);
-int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
-			      int chain);
+					struct ath9k_hw_cal_data *caldata,
+					int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+			      struct ath9k_hw_cal_data *caldata, int chain);
 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
 int ar9003_paprd_init_table(struct ath_hw *ah);
 bool ar9003_paprd_is_done(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 0429dda..3caa323 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -154,6 +154,27 @@
 	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 }
 
+static void ath_start_ani(struct ath_common *common)
+{
+	struct ath_hw *ah = common->ah;
+	unsigned long timestamp = jiffies_to_msecs(jiffies);
+	struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+	if (!(sc->sc_flags & SC_OP_ANI_RUN))
+		return;
+
+	if (sc->sc_flags & SC_OP_OFFCHANNEL)
+		return;
+
+	common->ani.longcal_timer = timestamp;
+	common->ani.shortcal_timer = timestamp;
+	common->ani.checkani_timer = timestamp;
+
+	mod_timer(&common->ani.timer,
+		  jiffies +
+			msecs_to_jiffies((u32)ah->config.ani_poll_interval));
+}
+
 /*
  * Set/change channels.  If the channel is really being changed, it's done
  * by reseting the chip.  To accomplish this we must first cleanup any pending
@@ -162,16 +183,23 @@
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 		    struct ath9k_channel *hchan)
 {
+	struct ath_wiphy *aphy = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_conf *conf = &common->hw->conf;
 	bool fastcc = true, stopped;
 	struct ieee80211_channel *channel = hw->conf.channel;
+	struct ath9k_hw_cal_data *caldata = NULL;
 	int r;
 
 	if (sc->sc_flags & SC_OP_INVALID)
 		return -EIO;
 
+	del_timer_sync(&common->ani.timer);
+	cancel_work_sync(&sc->paprd_work);
+	cancel_work_sync(&sc->hw_check_work);
+	cancel_delayed_work_sync(&sc->tx_complete_work);
+
 	ath9k_ps_wakeup(sc);
 
 	/*
@@ -191,9 +219,12 @@
 	 * to flush data frames already in queue because of
 	 * changing channel. */
 
-	if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
+	if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL))
 		fastcc = false;
 
+	if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
+		caldata = &aphy->caldata;
+
 	ath_print(common, ATH_DBG_CONFIG,
 		  "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
 		  sc->sc_ah->curchan->channel,
@@ -201,7 +232,7 @@
 
 	spin_lock_bh(&sc->sc_resetlock);
 
-	r = ath9k_hw_reset(ah, hchan, fastcc);
+	r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
 	if (r) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset channel (%u MHz), "
@@ -212,8 +243,6 @@
 	}
 	spin_unlock_bh(&sc->sc_resetlock);
 
-	sc->sc_flags &= ~SC_OP_FULL_RESET;
-
 	if (ath_startrecv(sc) != 0) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to restart recv logic\n");
@@ -225,6 +254,12 @@
 	ath_update_txpow(sc);
 	ath9k_hw_set_interrupts(ah, ah->imask);
 
+	if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) {
+		ath_start_ani(common);
+		ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+		ath_beacon_config(sc, NULL);
+	}
+
  ps_restore:
 	ath9k_ps_restore(sc);
 	return r;
@@ -233,17 +268,19 @@
 static void ath_paprd_activate(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int chain;
 
-	if (!ah->curchan->paprd_done)
+	if (!caldata || !caldata->paprd_done)
 		return;
 
 	ath9k_ps_wakeup(sc);
+	ar9003_paprd_enable(ah, false);
 	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
 		if (!(ah->caps.tx_chainmask & BIT(chain)))
 			continue;
 
-		ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
+		ar9003_paprd_populate_single_table(ah, caldata, chain);
 	}
 
 	ar9003_paprd_enable(ah, true);
@@ -261,6 +298,7 @@
 	int band = hw->conf.channel->band;
 	struct ieee80211_supported_band *sband = &sc->sbands[band];
 	struct ath_tx_control txctl;
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int qnum, ftype;
 	int chain_ok = 0;
 	int chain;
@@ -268,6 +306,9 @@
 	int time_left;
 	int i;
 
+	if (!caldata)
+		return;
+
 	skb = alloc_skb(len, GFP_KERNEL);
 	if (!skb)
 		return;
@@ -322,7 +363,7 @@
 		if (!ar9003_paprd_is_done(ah))
 			break;
 
-		if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
+		if (ar9003_paprd_create_curve(ah, caldata, chain) != 0)
 			break;
 
 		chain_ok = 1;
@@ -330,7 +371,7 @@
 	kfree_skb(skb);
 
 	if (chain_ok) {
-		ah->curchan->paprd_done = true;
+		caldata->paprd_done = true;
 		ath_paprd_activate(sc);
 	}
 
@@ -439,33 +480,14 @@
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
 
 	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
-	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
-	    !(sc->sc_flags & SC_OP_SCANNING)) {
-		if (!sc->sc_ah->curchan->paprd_done)
+	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
+		if (!ah->caldata->paprd_done)
 			ieee80211_queue_work(sc->hw, &sc->paprd_work);
 		else
 			ath_paprd_activate(sc);
 	}
 }
 
-static void ath_start_ani(struct ath_common *common)
-{
-	struct ath_hw *ah = common->ah;
-	unsigned long timestamp = jiffies_to_msecs(jiffies);
-	struct ath_softc *sc = (struct ath_softc *) common->priv;
-
-	if (!(sc->sc_flags & SC_OP_ANI_RUN))
-		return;
-
-	common->ani.longcal_timer = timestamp;
-	common->ani.shortcal_timer = timestamp;
-	common->ani.checkani_timer = timestamp;
-
-	mod_timer(&common->ani.timer,
-		  jiffies +
-			msecs_to_jiffies((u32)ah->config.ani_poll_interval));
-}
-
 /*
  * Update tx/rx chainmask. For legacy association,
  * hard code chainmask to 1x1, for 11n association, use
@@ -477,7 +499,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
+	if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht ||
 	    (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
 		common->tx_chainmask = ah->caps.tx_chainmask;
 		common->rx_chainmask = ah->caps.rx_chainmask;
@@ -817,7 +839,7 @@
 		ah->curchan = ath_get_curchannel(sc, sc->hw);
 
 	spin_lock_bh(&sc->sc_resetlock);
-	r = ath9k_hw_reset(ah, ah->curchan, false);
+	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
 	if (r) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset channel (%u MHz), "
@@ -877,7 +899,7 @@
 		ah->curchan = ath_get_curchannel(sc, hw);
 
 	spin_lock_bh(&sc->sc_resetlock);
-	r = ath9k_hw_reset(ah, ah->curchan, false);
+	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
 	if (r) {
 		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
 			  "Unable to reset channel (%u MHz), "
@@ -910,7 +932,7 @@
 	ath_flushrecv(sc);
 
 	spin_lock_bh(&sc->sc_resetlock);
-	r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+	r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
 	if (r)
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d\n", r);
@@ -1085,7 +1107,7 @@
 	 * and then setup of the interrupt mask.
 	 */
 	spin_lock_bh(&sc->sc_resetlock);
-	r = ath9k_hw_reset(ah, init_channel, false);
+	r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
 	if (r) {
 		ath_print(common, ATH_DBG_FATAL,
 			  "Unable to reset hardware; reset status %d "
@@ -1579,6 +1601,10 @@
 
 		aphy->chan_idx = pos;
 		aphy->chan_is_ht = conf_is_ht(conf);
+		if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+			sc->sc_flags |= SC_OP_OFFCHANNEL;
+		else
+			sc->sc_flags &= ~SC_OP_OFFCHANNEL;
 
 		if (aphy->state == ATH_WIPHY_SCAN ||
 		    aphy->state == ATH_WIPHY_ACTIVE)
@@ -1990,7 +2016,6 @@
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
-	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
 	mutex_lock(&sc->mutex);
 	if (ath9k_wiphy_scanning(sc)) {
@@ -2008,10 +2033,6 @@
 	aphy->state = ATH_WIPHY_SCAN;
 	ath9k_wiphy_pause_all_forced(sc, aphy);
 	sc->sc_flags |= SC_OP_SCANNING;
-	del_timer_sync(&common->ani.timer);
-	cancel_work_sync(&sc->paprd_work);
-	cancel_work_sync(&sc->hw_check_work);
-	cancel_delayed_work_sync(&sc->tx_complete_work);
 	mutex_unlock(&sc->mutex);
 }
 
@@ -2023,15 +2044,10 @@
 {
 	struct ath_wiphy *aphy = hw->priv;
 	struct ath_softc *sc = aphy->sc;
-	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
 	mutex_lock(&sc->mutex);
 	aphy->state = ATH_WIPHY_ACTIVE;
 	sc->sc_flags &= ~SC_OP_SCANNING;
-	sc->sc_flags |= SC_OP_FULL_RESET;
-	ath_start_ani(common);
-	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
-	ath_beacon_config(sc, NULL);
 	mutex_unlock(&sc->mutex);
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index da0cfe9..a3fc987 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1140,6 +1140,11 @@
 		if (flush)
 			goto requeue;
 
+		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+						 rxs, &decrypt_error);
+		if (retval)
+			goto requeue;
+
 		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
 		if (rs.rs_tstamp > tsf_lower &&
 		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1149,11 +1154,6 @@
 		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
 			rxs->mactime += 0x100000000ULL;
 
-		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
-						 rxs, &decrypt_error);
-		if (retval)
-			goto requeue;
-
 		/* Ensure we always have an skb to requeue once we are done
 		 * processing the current buffer's skb */
 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 501b728..4dda14e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -120,26 +120,14 @@
 	list_add_tail(&ac->list, &txq->axq_acq);
 }
 
-static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
-	struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
-
-	spin_lock_bh(&txq->axq_lock);
-	tid->paused++;
-	spin_unlock_bh(&txq->axq_lock);
-}
-
 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
 	struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
 
-	BUG_ON(tid->paused <= 0);
+	WARN_ON(!tid->paused);
+
 	spin_lock_bh(&txq->axq_lock);
-
-	tid->paused--;
-
-	if (tid->paused > 0)
-		goto unlock;
+	tid->paused = false;
 
 	if (list_empty(&tid->buf_q))
 		goto unlock;
@@ -157,15 +145,10 @@
 	struct list_head bf_head;
 	INIT_LIST_HEAD(&bf_head);
 
-	BUG_ON(tid->paused <= 0);
+	WARN_ON(!tid->paused);
+
 	spin_lock_bh(&txq->axq_lock);
-
-	tid->paused--;
-
-	if (tid->paused > 0) {
-		spin_unlock_bh(&txq->axq_lock);
-		return;
-	}
+	tid->paused = false;
 
 	while (!list_empty(&tid->buf_q)) {
 		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
@@ -811,7 +794,7 @@
 	an = (struct ath_node *)sta->drv_priv;
 	txtid = ATH_AN_2_TID(an, tid);
 	txtid->state |= AGGR_ADDBA_PROGRESS;
-	ath_tx_pause_tid(sc, txtid);
+	txtid->paused = true;
 	*ssn = txtid->seq_start;
 }
 
@@ -835,10 +818,9 @@
 		return;
 	}
 
-	ath_tx_pause_tid(sc, txtid);
-
 	/* drop all software retried frames and mark this TID */
 	spin_lock_bh(&txq->axq_lock);
+	txtid->paused = true;
 	while (!list_empty(&txtid->buf_q)) {
 		bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
 		if (!bf_isretried(bf)) {
@@ -1181,7 +1163,7 @@
 			  "Failed to stop TX DMA. Resetting hardware!\n");
 
 		spin_lock_bh(&sc->sc_resetlock);
-		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
 		if (r)
 			ath_print(common, ATH_DBG_FATAL,
 				  "Unable to reset hardware; reset status %d\n",
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 5bbff4c..a146240 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1924,6 +1924,10 @@
 		bg_band->channels =
 			kzalloc(geo->bg_channels *
 				sizeof(struct ieee80211_channel), GFP_KERNEL);
+		if (!bg_band->channels) {
+			ipw2100_down(priv);
+			return -ENOMEM;
+		}
 		/* translate geo->bg to bg_band.channels */
 		for (i = 0; i < geo->bg_channels; i++) {
 			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index f052c6d..d706b8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -980,7 +980,7 @@
 			 le32_to_cpu(bt->lo_priority_tx_req_cnt),
 			 accum_bt->lo_priority_tx_req_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
+			 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
 			 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
 			 accum_bt->lo_priority_tx_denied_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 35c86d2..23e5c42 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -300,8 +300,9 @@
 				      struct ieee80211_sta *sta)
 {
 	int ret = -EAGAIN;
+	u32 load = rs_tl_get_load(lq_data, tid);
 
-	if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
+	if (load > IWL_AGG_LOAD_THRESHOLD) {
 		IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
 				sta->addr, tid);
 		ret = ieee80211_start_tx_ba_session(sta, tid);
@@ -311,12 +312,14 @@
 			 * this might be cause by reloading firmware
 			 * stop the tx ba session here
 			 */
-			IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
+			IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
 				tid);
 			ieee80211_stop_tx_ba_session(sta, tid);
 		}
-	} else
-		IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
+	} else {
+		IWL_ERR(priv, "Aggregation not enabled for tid %d "
+			"because load = %u\n", tid, load);
+	}
 	return ret;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 55a1b31..ecb953f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -1331,7 +1331,14 @@
 	tid = ba_resp->tid;
 	agg = &priv->stations[sta_id].tid[tid].agg;
 	if (unlikely(agg->txq_id != scd_flow)) {
-		IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now!
+		 * since it is possible happen very often and in order
+		 * not to fill the syslog, don't enable the logging by default
+		 */
+		IWL_DEBUG_TX_REPLY(priv,
+			"BA scd_flow %d does not match txq_id %d\n",
 			scd_flow, agg->txq_id);
 		return;
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 8024d44..8ccb6d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2000,6 +2000,7 @@
 			      struct ieee80211_vif *vif)
 {
 	struct iwl_priv *priv = hw->priv;
+	bool scan_completed = false;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 
@@ -2013,7 +2014,7 @@
 	if (priv->vif == vif) {
 		priv->vif = NULL;
 		if (priv->scan_vif == vif) {
-			ieee80211_scan_completed(priv->hw, true);
+			scan_completed = true;
 			priv->scan_vif = NULL;
 			priv->scan_request = NULL;
 		}
@@ -2021,6 +2022,9 @@
 	}
 	mutex_unlock(&priv->mutex);
 
+	if (scan_completed)
+		ieee80211_scan_completed(priv->hw, true);
+
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 5c2bcef..0b961a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -71,7 +71,7 @@
 #define IWL_DEBUG(__priv, level, fmt, args...)
 #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
-				      void *p, u32 len)
+				      const void *p, u32 len)
 {}
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 822f8dc..71a101f 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -43,6 +43,8 @@
 	{ PCI_DEVICE(0x1260, 0x3886) },
 	/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
 	{ PCI_DEVICE(0x1260, 0xffff) },
+	/* Standard Microsystems Corp SMC2802W Wireless PCI */
+	{ PCI_DEVICE(0x10b8, 0x2802) },
 	{ },
 };
 
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index abff893..9c38fc3 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -97,7 +97,6 @@
 static const struct iw_handler_def ray_handler_def;
 
 /***** Prototypes for raylink functions **************************************/
-static int asc_to_int(char a);
 static void authenticate(ray_dev_t *local);
 static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
 static void authenticate_timeout(u_long);
@@ -1717,24 +1716,6 @@
 }
 
 /*===========================================================================*/
-static int asc_to_int(char a)
-{
-	if (a < '0')
-		return -1;
-	if (a <= '9')
-		return (a - '0');
-	if (a < 'A')
-		return -1;
-	if (a <= 'F')
-		return (10 + a - 'A');
-	if (a < 'a')
-		return -1;
-	if (a <= 'f')
-		return (10 + a - 'a');
-	return -1;
-}
-
-/*===========================================================================*/
 static int parse_addr(char *in_str, UCHAR *out)
 {
 	int len;
@@ -1754,14 +1735,14 @@
 	i = 5;
 
 	while (j > 0) {
-		if ((k = asc_to_int(in_str[j--])) != -1)
+		if ((k = hex_to_bin(in_str[j--])) != -1)
 			out[i] = k;
 		else
 			return 0;
 
 		if (j == 0)
 			break;
-		if ((k = asc_to_int(in_str[j--])) != -1)
+		if ((k = hex_to_bin(in_str[j--])) != -1)
 			out[i] += k << 4;
 		else
 			return 0;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 19b262e..63c2cc4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -240,16 +240,16 @@
 	struct rt2x00_dev *rt2x00dev;
 	int retval;
 
-	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
-	if (retval) {
-		ERROR_PROBE("PCI request regions failed.\n");
-		return retval;
-	}
-
 	retval = pci_enable_device(pci_dev);
 	if (retval) {
 		ERROR_PROBE("Enable device failed.\n");
-		goto exit_release_regions;
+		return retval;
+	}
+
+	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
+	if (retval) {
+		ERROR_PROBE("PCI request regions failed.\n");
+		goto exit_disable_device;
 	}
 
 	pci_set_master(pci_dev);
@@ -260,14 +260,14 @@
 	if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
 		ERROR_PROBE("PCI DMA not supported.\n");
 		retval = -EIO;
-		goto exit_disable_device;
+		goto exit_release_regions;
 	}
 
 	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
 	if (!hw) {
 		ERROR_PROBE("Failed to allocate hardware.\n");
 		retval = -ENOMEM;
-		goto exit_disable_device;
+		goto exit_release_regions;
 	}
 
 	pci_set_drvdata(pci_dev, hw);
@@ -300,13 +300,12 @@
 exit_free_device:
 	ieee80211_free_hw(hw);
 
-exit_disable_device:
-	if (retval != -EBUSY)
-		pci_disable_device(pci_dev);
-
 exit_release_regions:
 	pci_release_regions(pci_dev);
 
+exit_disable_device:
+	pci_disable_device(pci_dev);
+
 	pci_set_drvdata(pci_dev, NULL);
 
 	return retval;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 96d25fb..4cb99c5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -160,9 +160,8 @@
 	spi_message_add_tail(&t, &m);
 
 	spi_sync(wl_to_spi(wl), &m);
-	kfree(cmd);
-
 	wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+	kfree(cmd);
 }
 
 #define WL1271_BUSY_WORD_TIMEOUT 1000
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 147bb1a..a75ed30 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -295,7 +295,7 @@
 	int err;
 	err = ccwgroup_create_from_string(claw_root_dev,
 					  claw_group_driver.driver_id,
-					  &claw_ccw_driver, 3, buf);
+					  &claw_ccw_driver, 2, buf);
 	return err ? err : count;
 }
 
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d798927..d125776 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -188,8 +188,7 @@
 		qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
 
 #define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
-#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
-#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
+#define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
 
 #define QETH_MODELLIST_ARRAY \
 	{{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
@@ -741,6 +740,7 @@
 	struct qeth_qdio_info qdio;
 	struct qeth_perf_stats perf_stats;
 	int use_hard_stop;
+	int read_or_write_problem;
 	struct qeth_osn_info osn_info;
 	struct qeth_discipline discipline;
 	atomic_t force_alloc_skb;
@@ -748,6 +748,7 @@
 	struct qdio_ssqd_desc ssqd;
 	debug_info_t *debug;
 	struct mutex conf_mutex;
+	struct mutex discipline_mutex;
 };
 
 struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b701906..3a5a18a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -262,6 +262,7 @@
 		QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
 			"rc=%i\n", dev_name(&card->gdev->dev), rc);
 		atomic_set(&card->read.irq_pending, 0);
+		card->read_or_write_problem = 1;
 		qeth_schedule_recovery(card);
 		wake_up(&card->wait_q);
 	}
@@ -382,6 +383,7 @@
 		qeth_put_reply(reply);
 	}
 	spin_unlock_irqrestore(&card->lock, flags);
+	atomic_set(&card->write.irq_pending, 0);
 }
 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
 
@@ -1076,6 +1078,7 @@
 	card->state = CARD_STATE_DOWN;
 	card->lan_online = 0;
 	card->use_hard_stop = 0;
+	card->read_or_write_problem = 0;
 	card->dev = NULL;
 	spin_lock_init(&card->vlanlock);
 	spin_lock_init(&card->mclock);
@@ -1084,6 +1087,7 @@
 	spin_lock_init(&card->ip_lock);
 	spin_lock_init(&card->thread_mask_lock);
 	mutex_init(&card->conf_mutex);
+	mutex_init(&card->discipline_mutex);
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
@@ -1383,12 +1387,7 @@
 {
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_IQD:
-		if (card->ipato.enabled)
-			card->info.func_level =
-				QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
-		else
-			card->info.func_level =
-				QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
 		break;
 	case QETH_CARD_TYPE_OSD:
 	case QETH_CARD_TYPE_OSN:
@@ -1662,6 +1661,10 @@
 
 	QETH_CARD_TEXT(card, 2, "sendctl");
 
+	if (card->read_or_write_problem) {
+		qeth_release_buffer(iob->channel, iob);
+		return -EIO;
+	}
 	reply = qeth_alloc_reply(card);
 	if (!reply) {
 		return -ENOMEM;
@@ -1733,6 +1736,9 @@
 	spin_unlock_irqrestore(&reply->card->lock, flags);
 	reply->rc = -ETIME;
 	atomic_inc(&reply->received);
+	atomic_set(&card->write.irq_pending, 0);
+	qeth_release_buffer(iob->channel, iob);
+	card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
 	wake_up(&reply->wait_q);
 	rc = reply->rc;
 	qeth_put_reply(reply);
@@ -1990,7 +1996,7 @@
 		QETH_DBF_TEXT(SETUP, 2, "olmlimit");
 		dev_err(&card->gdev->dev, "A connection could not be "
 			"established because of an OLM limit\n");
-		rc = -EMLINK;
+		iob->rc = -EMLINK;
 	}
 	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
 	return rc;
@@ -2489,6 +2495,10 @@
 	qeth_prepare_ipa_cmd(card, iob, prot_type);
 	rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
 						iob, reply_cb, reply_param);
+	if (rc == -ETIME) {
+		qeth_clear_ipacmd_list(card);
+		qeth_schedule_recovery(card);
+	}
 	return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
@@ -3413,7 +3423,6 @@
 {
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_set_access_ctrl *access_ctrl_req;
-	int rc;
 
 	QETH_CARD_TEXT(card, 4, "setaccb");
 
@@ -3440,7 +3449,6 @@
 			card->gdev->dev.kobj.name,
 			access_ctrl_req->subcmd_code,
 			cmd->data.setadapterparms.hdr.return_code);
-		rc = 0;
 		break;
 	}
 	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
@@ -3454,7 +3462,6 @@
 
 		/* ensure isolation mode is "none" */
 		card->options.isolation = ISOLATION_MODE_NONE;
-		rc = -EOPNOTSUPP;
 		break;
 	}
 	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
@@ -3469,7 +3476,6 @@
 
 		/* ensure isolation mode is "none" */
 		card->options.isolation = ISOLATION_MODE_NONE;
-		rc = -EOPNOTSUPP;
 		break;
 	}
 	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
@@ -3483,7 +3489,6 @@
 
 		/* ensure isolation mode is "none" */
 		card->options.isolation = ISOLATION_MODE_NONE;
-		rc = -EPERM;
 		break;
 	}
 	default:
@@ -3497,12 +3502,11 @@
 
 		/* ensure isolation mode is "none" */
 		card->options.isolation = ISOLATION_MODE_NONE;
-		rc = 0;
 		break;
 	}
 	}
 	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
-	return rc;
+	return 0;
 }
 
 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
@@ -3744,15 +3748,10 @@
 	/* skip 4 bytes (data_len struct member) to get req_len */
 	if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
 		return -EFAULT;
-	ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
-	if (!ureq) {
+	ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+	if (IS_ERR(ureq)) {
 		QETH_CARD_TEXT(card, 2, "snmpnome");
-		return -ENOMEM;
-	}
-	if (copy_from_user(ureq, udata,
-			req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
-		kfree(ureq);
-		return -EFAULT;
+		return PTR_ERR(ureq);
 	}
 	qinfo.udata_len = ureq->hdr.data_len;
 	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
@@ -3971,6 +3970,7 @@
 		else
 			goto retry;
 	}
+	card->read_or_write_problem = 0;
 	rc = qeth_mpc_initialize(card);
 	if (rc) {
 		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
@@ -4353,16 +4353,18 @@
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
 	QETH_DBF_TEXT(SETUP, 2, "removedv");
-	if (card->discipline.ccwgdriver) {
-		card->discipline.ccwgdriver->remove(gdev);
-		qeth_core_free_discipline(card);
-	}
 
 	if (card->info.type == QETH_CARD_TYPE_OSN) {
 		qeth_core_remove_osn_attributes(&gdev->dev);
 	} else {
 		qeth_core_remove_device_attributes(&gdev->dev);
 	}
+
+	if (card->discipline.ccwgdriver) {
+		card->discipline.ccwgdriver->remove(gdev);
+		qeth_core_free_discipline(card);
+	}
+
 	debug_unregister(card->debug);
 	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
 	list_del(&card->list);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 2eb022f..42fa783 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -411,7 +411,7 @@
 	if (!card)
 		return -EINVAL;
 
-	mutex_lock(&card->conf_mutex);
+	mutex_lock(&card->discipline_mutex);
 	if (card->state != CARD_STATE_DOWN) {
 		rc = -EPERM;
 		goto out;
@@ -433,6 +433,7 @@
 	if (card->options.layer2 == newdis)
 		goto out;
 	else {
+		card->info.mac_bits  = 0;
 		if (card->discipline.ccwgdriver) {
 			card->discipline.ccwgdriver->remove(card->gdev);
 			qeth_core_free_discipline(card);
@@ -445,7 +446,7 @@
 
 	rc = card->discipline.ccwgdriver->probe(card->gdev);
 out:
-	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return rc ? rc : count;
 }
 
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 32d07c2..830d6352 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -860,8 +860,6 @@
 		unregister_netdev(card->dev);
 		card->dev = NULL;
 	}
-
-	qeth_l2_del_all_mc(card);
 	return;
 }
 
@@ -935,6 +933,7 @@
 	enum qeth_card_states recover_flag;
 
 	BUG_ON(!card);
+	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 2, "setonlin");
 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -1012,6 +1011,7 @@
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
 out:
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return 0;
 
 out_remove:
@@ -1025,6 +1025,7 @@
 	else
 		card->state = CARD_STATE_DOWN;
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return rc;
 }
 
@@ -1040,6 +1041,7 @@
 	int rc = 0, rc2 = 0, rc3 = 0;
 	enum qeth_card_states recover_flag;
 
+	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 3, "setoffl");
 	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1060,6 +1062,7 @@
 	/* let user_space know that device is offline */
 	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return 0;
 }
 
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 8447d23..e705b27 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -64,5 +64,6 @@
 			const u8 *);
 int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
 int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
 
 #endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 61d348e..e22ae24 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -195,7 +195,7 @@
 	}
 }
 
-static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
 						struct qeth_ipaddr *addr)
 {
 	struct qeth_ipato_entry *ipatoe;
@@ -3354,6 +3354,8 @@
 {
 	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
 
+	qeth_l3_remove_device_attributes(&cgdev->dev);
+
 	qeth_set_allowed_threads(card, 0, 1);
 	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
@@ -3367,7 +3369,6 @@
 		card->dev = NULL;
 	}
 
-	qeth_l3_remove_device_attributes(&cgdev->dev);
 	qeth_l3_clear_ip_list(card, 0, 0);
 	qeth_l3_clear_ipato_list(card);
 	return;
@@ -3380,6 +3381,7 @@
 	enum qeth_card_states recover_flag;
 
 	BUG_ON(!card);
+	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 2, "setonlin");
 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3461,6 +3463,7 @@
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
 out:
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return 0;
 out_remove:
 	card->use_hard_stop = 1;
@@ -3473,6 +3476,7 @@
 	else
 		card->state = CARD_STATE_DOWN;
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return rc;
 }
 
@@ -3488,6 +3492,7 @@
 	int rc = 0, rc2 = 0, rc3 = 0;
 	enum qeth_card_states recover_flag;
 
+	mutex_lock(&card->discipline_mutex);
 	mutex_lock(&card->conf_mutex);
 	QETH_DBF_TEXT(SETUP, 3, "setoffl");
 	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3508,6 +3513,7 @@
 	/* let user_space know that device is offline */
 	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
 	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
 	return 0;
 }
 
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index fb5318b..67cfa68 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -479,6 +479,7 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
+	struct qeth_ipaddr *tmpipa, *t;
 	char *tmp;
 	int rc = 0;
 
@@ -497,8 +498,21 @@
 		card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
 	} else if (!strcmp(tmp, "1")) {
 		card->ipato.enabled = 1;
+		list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+			if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
+				qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
+				tmpipa->set_flags |=
+					QETH_IPA_SETIP_TAKEOVER_FLAG;
+		}
+
 	} else if (!strcmp(tmp, "0")) {
 		card->ipato.enabled = 0;
+		list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+			if (tmpipa->set_flags &
+				QETH_IPA_SETIP_TAKEOVER_FLAG)
+				tmpipa->set_flags &=
+					~QETH_IPA_SETIP_TAKEOVER_FLAG;
+		}
 	} else
 		rc = -EINVAL;
 out:
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7a104e2..f13e56b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -74,6 +74,22 @@
 	}
 	return seg;
 }
+/* Copy iovec entries for len bytes from iovec. */
+static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
+			   size_t len, int iovcount)
+{
+	int seg = 0;
+	size_t size;
+	while (len && seg < iovcount) {
+		size = min(from->iov_len, len);
+		to->iov_base = from->iov_base;
+		to->iov_len = size;
+		len -= size;
+		++from;
+		++to;
+		++seg;
+	}
+}
 
 /* Caller must have TX VQ lock */
 static void tx_poll_stop(struct vhost_net *net)
@@ -129,7 +145,7 @@
 
 	if (wmem < sock->sk->sk_sndbuf / 2)
 		tx_poll_stop(net);
-	hdr_size = vq->hdr_size;
+	hdr_size = vq->vhost_hlen;
 
 	for (;;) {
 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
@@ -172,7 +188,7 @@
 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
 		err = sock->ops->sendmsg(NULL, sock, &msg, len);
 		if (unlikely(err < 0)) {
-			vhost_discard_vq_desc(vq);
+			vhost_discard_vq_desc(vq, 1);
 			tx_poll_start(net, sock);
 			break;
 		}
@@ -191,9 +207,82 @@
 	unuse_mm(net->dev.mm);
 }
 
+static int peek_head_len(struct sock *sk)
+{
+	struct sk_buff *head;
+	int len = 0;
+
+	lock_sock(sk);
+	head = skb_peek(&sk->sk_receive_queue);
+	if (head)
+		len = head->len;
+	release_sock(sk);
+	return len;
+}
+
+/* This is a multi-buffer version of vhost_get_desc, that works if
+ *	vq has read descriptors only.
+ * @vq		- the relevant virtqueue
+ * @datalen	- data length we'll be reading
+ * @iovcount	- returned count of io vectors we fill
+ * @log		- vhost log
+ * @log_num	- log offset
+ *	returns number of buffer heads allocated, negative on error
+ */
+static int get_rx_bufs(struct vhost_virtqueue *vq,
+		       struct vring_used_elem *heads,
+		       int datalen,
+		       unsigned *iovcount,
+		       struct vhost_log *log,
+		       unsigned *log_num)
+{
+	unsigned int out, in;
+	int seg = 0;
+	int headcount = 0;
+	unsigned d;
+	int r, nlogs = 0;
+
+	while (datalen > 0) {
+		if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+			r = -ENOBUFS;
+			goto err;
+		}
+		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+				      ARRAY_SIZE(vq->iov) - seg, &out,
+				      &in, log, log_num);
+		if (d == vq->num) {
+			r = 0;
+			goto err;
+		}
+		if (unlikely(out || in <= 0)) {
+			vq_err(vq, "unexpected descriptor format for RX: "
+				"out %d, in %d\n", out, in);
+			r = -EINVAL;
+			goto err;
+		}
+		if (unlikely(log)) {
+			nlogs += *log_num;
+			log += *log_num;
+		}
+		heads[headcount].id = d;
+		heads[headcount].len = iov_length(vq->iov + seg, in);
+		datalen -= heads[headcount].len;
+		++headcount;
+		seg += in;
+	}
+	heads[headcount - 1].len += datalen;
+	*iovcount = seg;
+	if (unlikely(log))
+		*log_num = nlogs;
+	return headcount;
+err:
+	vhost_discard_vq_desc(vq, headcount);
+	return r;
+}
+
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
-static void handle_rx(struct vhost_net *net)
+static void handle_rx_big(struct vhost_net *net)
 {
 	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
 	unsigned out, in, log, s;
@@ -223,7 +312,7 @@
 	use_mm(net->dev.mm);
 	mutex_lock(&vq->mutex);
 	vhost_disable_notify(vq);
-	hdr_size = vq->hdr_size;
+	hdr_size = vq->vhost_hlen;
 
 	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
 		vq->log : NULL;
@@ -270,14 +359,14 @@
 					 len, MSG_DONTWAIT | MSG_TRUNC);
 		/* TODO: Check specific error and bomb out unless EAGAIN? */
 		if (err < 0) {
-			vhost_discard_vq_desc(vq);
+			vhost_discard_vq_desc(vq, 1);
 			break;
 		}
 		/* TODO: Should check and handle checksum. */
 		if (err > len) {
 			pr_debug("Discarded truncated rx packet: "
 				 " len %d > %zd\n", err, len);
-			vhost_discard_vq_desc(vq);
+			vhost_discard_vq_desc(vq, 1);
 			continue;
 		}
 		len = err;
@@ -302,54 +391,175 @@
 	unuse_mm(net->dev.mm);
 }
 
-static void handle_tx_kick(struct work_struct *work)
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_rx_mergeable(struct vhost_net *net)
 {
-	struct vhost_virtqueue *vq;
-	struct vhost_net *net;
-	vq = container_of(work, struct vhost_virtqueue, poll.work);
-	net = container_of(vq->dev, struct vhost_net, dev);
+	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
+	unsigned uninitialized_var(in), log;
+	struct vhost_log *vq_log;
+	struct msghdr msg = {
+		.msg_name = NULL,
+		.msg_namelen = 0,
+		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
+		.msg_controllen = 0,
+		.msg_iov = vq->iov,
+		.msg_flags = MSG_DONTWAIT,
+	};
+
+	struct virtio_net_hdr_mrg_rxbuf hdr = {
+		.hdr.flags = 0,
+		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+	};
+
+	size_t total_len = 0;
+	int err, headcount;
+	size_t vhost_hlen, sock_hlen;
+	size_t vhost_len, sock_len;
+	struct socket *sock = rcu_dereference(vq->private_data);
+	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
+		return;
+
+	use_mm(net->dev.mm);
+	mutex_lock(&vq->mutex);
+	vhost_disable_notify(vq);
+	vhost_hlen = vq->vhost_hlen;
+	sock_hlen = vq->sock_hlen;
+
+	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+		vq->log : NULL;
+
+	while ((sock_len = peek_head_len(sock->sk))) {
+		sock_len += sock_hlen;
+		vhost_len = sock_len + vhost_hlen;
+		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+					&in, vq_log, &log);
+		/* On error, stop handling until the next kick. */
+		if (unlikely(headcount < 0))
+			break;
+		/* OK, now we need to know about added descriptors. */
+		if (!headcount) {
+			if (unlikely(vhost_enable_notify(vq))) {
+				/* They have slipped one in as we were
+				 * doing that: check again. */
+				vhost_disable_notify(vq);
+				continue;
+			}
+			/* Nothing new?  Wait for eventfd to tell us
+			 * they refilled. */
+			break;
+		}
+		/* We don't need to be notified again. */
+		if (unlikely((vhost_hlen)))
+			/* Skip header. TODO: support TSO. */
+			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
+		else
+			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
+			 * needed because sendmsg can modify msg_iov. */
+			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
+		msg.msg_iovlen = in;
+		err = sock->ops->recvmsg(NULL, sock, &msg,
+					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
+		/* Userspace might have consumed the packet meanwhile:
+		 * it's not supposed to do this usually, but might be hard
+		 * to prevent. Discard data we got (if any) and keep going. */
+		if (unlikely(err != sock_len)) {
+			pr_debug("Discarded rx packet: "
+				 " len %d, expected %zd\n", err, sock_len);
+			vhost_discard_vq_desc(vq, headcount);
+			continue;
+		}
+		if (unlikely(vhost_hlen) &&
+		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
+				      vhost_hlen)) {
+			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
+			       vq->iov->iov_base);
+			break;
+		}
+		/* TODO: Should check and handle checksum. */
+		if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) &&
+		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
+				      offsetof(typeof(hdr), num_buffers),
+				      sizeof hdr.num_buffers)) {
+			vq_err(vq, "Failed num_buffers write");
+			vhost_discard_vq_desc(vq, headcount);
+			break;
+		}
+		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+					    headcount);
+		if (unlikely(vq_log))
+			vhost_log_write(vq, vq_log, log, vhost_len);
+		total_len += vhost_len;
+		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+			vhost_poll_queue(&vq->poll);
+			break;
+		}
+	}
+
+	mutex_unlock(&vq->mutex);
+	unuse_mm(net->dev.mm);
+}
+
+static void handle_rx(struct vhost_net *net)
+{
+	if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
+		handle_rx_mergeable(net);
+	else
+		handle_rx_big(net);
+}
+
+static void handle_tx_kick(struct vhost_work *work)
+{
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						  poll.work);
+	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
 	handle_tx(net);
 }
 
-static void handle_rx_kick(struct work_struct *work)
+static void handle_rx_kick(struct vhost_work *work)
 {
-	struct vhost_virtqueue *vq;
-	struct vhost_net *net;
-	vq = container_of(work, struct vhost_virtqueue, poll.work);
-	net = container_of(vq->dev, struct vhost_net, dev);
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						  poll.work);
+	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
 	handle_rx(net);
 }
 
-static void handle_tx_net(struct work_struct *work)
+static void handle_tx_net(struct vhost_work *work)
 {
-	struct vhost_net *net;
-	net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
+	struct vhost_net *net = container_of(work, struct vhost_net,
+					     poll[VHOST_NET_VQ_TX].work);
 	handle_tx(net);
 }
 
-static void handle_rx_net(struct work_struct *work)
+static void handle_rx_net(struct vhost_work *work)
 {
-	struct vhost_net *net;
-	net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
+	struct vhost_net *net = container_of(work, struct vhost_net,
+					     poll[VHOST_NET_VQ_RX].work);
 	handle_rx(net);
 }
 
 static int vhost_net_open(struct inode *inode, struct file *f)
 {
 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+	struct vhost_dev *dev;
 	int r;
+
 	if (!n)
 		return -ENOMEM;
+
+	dev = &n->dev;
 	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
 	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
-	r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
+	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
 	if (r < 0) {
 		kfree(n);
 		return r;
 	}
 
-	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
-	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
+	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
+	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
 	n->tx_poll_state = VHOST_NET_POLL_DISABLED;
 
 	f->private_data = n;
@@ -533,7 +743,6 @@
                 vhost_net_enable_vq(n, vq);
 	}
 
-done:
 	mutex_unlock(&vq->mutex);
 
 	if (oldsock) {
@@ -574,9 +783,21 @@
 
 static int vhost_net_set_features(struct vhost_net *n, u64 features)
 {
-	size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
-		sizeof(struct virtio_net_hdr) : 0;
+	size_t vhost_hlen, sock_hlen, hdr_len;
 	int i;
+
+	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
+			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+			sizeof(struct virtio_net_hdr);
+	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
+		/* vhost provides vnet_hdr */
+		vhost_hlen = hdr_len;
+		sock_hlen = 0;
+	} else {
+		/* socket provides vnet_hdr */
+		vhost_hlen = 0;
+		sock_hlen = hdr_len;
+	}
 	mutex_lock(&n->dev.mutex);
 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
 	    !vhost_log_access_ok(&n->dev)) {
@@ -587,7 +808,8 @@
 	smp_wmb();
 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
 		mutex_lock(&n->vqs[i].mutex);
-		n->vqs[i].hdr_size = hdr_size;
+		n->vqs[i].vhost_hlen = vhost_hlen;
+		n->vqs[i].sock_hlen = sock_hlen;
 		mutex_unlock(&n->vqs[i].mutex);
 	}
 	vhost_net_flush(n);
@@ -657,25 +879,13 @@
 
 static int vhost_net_init(void)
 {
-	int r = vhost_init();
-	if (r)
-		goto err_init;
-	r = misc_register(&vhost_net_misc);
-	if (r)
-		goto err_reg;
-	return 0;
-err_reg:
-	vhost_cleanup();
-err_init:
-	return r;
-
+	return misc_register(&vhost_net_misc);
 }
 module_init(vhost_net_init);
 
 static void vhost_net_exit(void)
 {
 	misc_deregister(&vhost_net_misc);
-	vhost_cleanup();
 }
 module_exit(vhost_net_exit);
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 248ed2d..e05557d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -17,12 +17,13 @@
 #include <linux/mm.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/rcupdate.h>
 #include <linux/poll.h>
 #include <linux/file.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/cgroup.h>
 
 #include <linux/net.h>
 #include <linux/if_packet.h>
@@ -37,8 +38,6 @@
 	VHOST_MEMORY_F_LOG = 0x1,
 };
 
-static struct workqueue_struct *vhost_workqueue;
-
 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 			    poll_table *pt)
 {
@@ -52,23 +51,31 @@
 static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
 			     void *key)
 {
-	struct vhost_poll *poll;
-	poll = container_of(wait, struct vhost_poll, wait);
+	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+
 	if (!((unsigned long)key & poll->mask))
 		return 0;
 
-	queue_work(vhost_workqueue, &poll->work);
+	vhost_poll_queue(poll);
 	return 0;
 }
 
 /* Init poll structure */
-void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
-		     unsigned long mask)
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+		     unsigned long mask, struct vhost_dev *dev)
 {
-	INIT_WORK(&poll->work, func);
+	struct vhost_work *work = &poll->work;
+
 	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
 	init_poll_funcptr(&poll->table, vhost_poll_func);
 	poll->mask = mask;
+	poll->dev = dev;
+
+	INIT_LIST_HEAD(&work->node);
+	work->fn = fn;
+	init_waitqueue_head(&work->done);
+	work->flushing = 0;
+	work->queue_seq = work->done_seq = 0;
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -92,12 +99,40 @@
  * locks that are also used by the callback. */
 void vhost_poll_flush(struct vhost_poll *poll)
 {
-	flush_work(&poll->work);
+	struct vhost_work *work = &poll->work;
+	unsigned seq;
+	int left;
+	int flushing;
+
+	spin_lock_irq(&poll->dev->work_lock);
+	seq = work->queue_seq;
+	work->flushing++;
+	spin_unlock_irq(&poll->dev->work_lock);
+	wait_event(work->done, ({
+		   spin_lock_irq(&poll->dev->work_lock);
+		   left = seq - work->done_seq <= 0;
+		   spin_unlock_irq(&poll->dev->work_lock);
+		   left;
+	}));
+	spin_lock_irq(&poll->dev->work_lock);
+	flushing = --work->flushing;
+	spin_unlock_irq(&poll->dev->work_lock);
+	BUG_ON(flushing < 0);
 }
 
 void vhost_poll_queue(struct vhost_poll *poll)
 {
-	queue_work(vhost_workqueue, &poll->work);
+	struct vhost_dev *dev = poll->dev;
+	struct vhost_work *work = &poll->work;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->work_lock, flags);
+	if (list_empty(&work->node)) {
+		list_add_tail(&work->node, &dev->work_list);
+		work->queue_seq++;
+		wake_up_process(dev->worker);
+	}
+	spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -114,7 +149,8 @@
 	vq->used_flags = 0;
 	vq->log_used = false;
 	vq->log_addr = -1ull;
-	vq->hdr_size = 0;
+	vq->vhost_hlen = 0;
+	vq->sock_hlen = 0;
 	vq->private_data = NULL;
 	vq->log_base = NULL;
 	vq->error_ctx = NULL;
@@ -125,10 +161,51 @@
 	vq->log_ctx = NULL;
 }
 
+static int vhost_worker(void *data)
+{
+	struct vhost_dev *dev = data;
+	struct vhost_work *work = NULL;
+	unsigned uninitialized_var(seq);
+
+	for (;;) {
+		/* mb paired w/ kthread_stop */
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock_irq(&dev->work_lock);
+		if (work) {
+			work->done_seq = seq;
+			if (work->flushing)
+				wake_up_all(&work->done);
+		}
+
+		if (kthread_should_stop()) {
+			spin_unlock_irq(&dev->work_lock);
+			__set_current_state(TASK_RUNNING);
+			return 0;
+		}
+		if (!list_empty(&dev->work_list)) {
+			work = list_first_entry(&dev->work_list,
+						struct vhost_work, node);
+			list_del_init(&work->node);
+			seq = work->queue_seq;
+		} else
+			work = NULL;
+		spin_unlock_irq(&dev->work_lock);
+
+		if (work) {
+			__set_current_state(TASK_RUNNING);
+			work->fn(work);
+		} else
+			schedule();
+
+	}
+}
+
 long vhost_dev_init(struct vhost_dev *dev,
 		    struct vhost_virtqueue *vqs, int nvqs)
 {
 	int i;
+
 	dev->vqs = vqs;
 	dev->nvqs = nvqs;
 	mutex_init(&dev->mutex);
@@ -136,6 +213,9 @@
 	dev->log_file = NULL;
 	dev->memory = NULL;
 	dev->mm = NULL;
+	spin_lock_init(&dev->work_lock);
+	INIT_LIST_HEAD(&dev->work_list);
+	dev->worker = NULL;
 
 	for (i = 0; i < dev->nvqs; ++i) {
 		dev->vqs[i].dev = dev;
@@ -143,9 +223,9 @@
 		vhost_vq_reset(dev, dev->vqs + i);
 		if (dev->vqs[i].handle_kick)
 			vhost_poll_init(&dev->vqs[i].poll,
-					dev->vqs[i].handle_kick,
-					POLLIN);
+					dev->vqs[i].handle_kick, POLLIN, dev);
 	}
+
 	return 0;
 }
 
@@ -159,12 +239,36 @@
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
+	struct task_struct *worker;
+	int err;
 	/* Is there an owner already? */
-	if (dev->mm)
-		return -EBUSY;
+	if (dev->mm) {
+		err = -EBUSY;
+		goto err_mm;
+	}
 	/* No owner, become one */
 	dev->mm = get_task_mm(current);
+	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
+	if (IS_ERR(worker)) {
+		err = PTR_ERR(worker);
+		goto err_worker;
+	}
+
+	dev->worker = worker;
+	err = cgroup_attach_task_current_cg(worker);
+	if (err)
+		goto err_cgroup;
+	wake_up_process(worker);	/* avoid contributing to loadavg */
+
 	return 0;
+err_cgroup:
+	kthread_stop(worker);
+err_worker:
+	if (dev->mm)
+		mmput(dev->mm);
+	dev->mm = NULL;
+err_mm:
+	return err;
 }
 
 /* Caller should have device mutex */
@@ -217,6 +321,9 @@
 	if (dev->mm)
 		mmput(dev->mm);
 	dev->mm = NULL;
+
+	WARN_ON(!list_empty(&dev->work_list));
+	kthread_stop(dev->worker);
 }
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -995,9 +1102,9 @@
 }
 
 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
-void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
 {
-	vq->last_avail_idx--;
+	vq->last_avail_idx -= n;
 }
 
 /* After we've used one of their buffers, we tell them about it.  We'll then
@@ -1042,6 +1149,67 @@
 	return 0;
 }
 
+static int __vhost_add_used_n(struct vhost_virtqueue *vq,
+			    struct vring_used_elem *heads,
+			    unsigned count)
+{
+	struct vring_used_elem __user *used;
+	int start;
+
+	start = vq->last_used_idx % vq->num;
+	used = vq->used->ring + start;
+	if (copy_to_user(used, heads, count * sizeof *used)) {
+		vq_err(vq, "Failed to write used");
+		return -EFAULT;
+	}
+	if (unlikely(vq->log_used)) {
+		/* Make sure data is seen before log. */
+		smp_wmb();
+		/* Log used ring entry write. */
+		log_write(vq->log_base,
+			  vq->log_addr +
+			   ((void __user *)used - (void __user *)vq->used),
+			  count * sizeof *used);
+	}
+	vq->last_used_idx += count;
+	return 0;
+}
+
+/* After we've used one of their buffers, we tell them about it.  We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+		     unsigned count)
+{
+	int start, n, r;
+
+	start = vq->last_used_idx % vq->num;
+	n = vq->num - start;
+	if (n < count) {
+		r = __vhost_add_used_n(vq, heads, n);
+		if (r < 0)
+			return r;
+		heads += n;
+		count -= n;
+	}
+	r = __vhost_add_used_n(vq, heads, count);
+
+	/* Make sure buffer is written before we update index. */
+	smp_wmb();
+	if (put_user(vq->last_used_idx, &vq->used->idx)) {
+		vq_err(vq, "Failed to increment used idx");
+		return -EFAULT;
+	}
+	if (unlikely(vq->log_used)) {
+		/* Log used index update. */
+		log_write(vq->log_base,
+			  vq->log_addr + offsetof(struct vring_used, idx),
+			  sizeof vq->used->idx);
+		if (vq->log_ctx)
+			eventfd_signal(vq->log_ctx, 1);
+	}
+	return r;
+}
+
 /* This actually signals the guest, using eventfd. */
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
@@ -1076,6 +1244,15 @@
 	vhost_signal(dev, vq);
 }
 
+/* multi-buffer version of vhost_add_used_and_signal */
+void vhost_add_used_and_signal_n(struct vhost_dev *dev,
+				 struct vhost_virtqueue *vq,
+				 struct vring_used_elem *heads, unsigned count)
+{
+	vhost_add_used_n(vq, heads, count);
+	vhost_signal(dev, vq);
+}
+
 /* OK, now we need to know about added descriptors. */
 bool vhost_enable_notify(struct vhost_virtqueue *vq)
 {
@@ -1100,7 +1277,7 @@
 		return false;
 	}
 
-	return avail_idx != vq->last_avail_idx;
+	return avail_idx != vq->avail_idx;
 }
 
 /* We don't need to be notified again. */
@@ -1115,16 +1292,3 @@
 		vq_err(vq, "Failed to enable notification at %p: %d\n",
 		       &vq->used->flags, r);
 }
-
-int vhost_init(void)
-{
-	vhost_workqueue = create_singlethread_workqueue("vhost");
-	if (!vhost_workqueue)
-		return -ENOMEM;
-	return 0;
-}
-
-void vhost_cleanup(void)
-{
-	destroy_workqueue(vhost_workqueue);
-}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 11ee13d..afd7729 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -5,13 +5,13 @@
 #include <linux/vhost.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/poll.h>
 #include <linux/file.h>
 #include <linux/skbuff.h>
 #include <linux/uio.h>
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
+#include <asm/atomic.h>
 
 struct vhost_device;
 
@@ -20,19 +20,31 @@
 	VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
 };
 
+struct vhost_work;
+typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+
+struct vhost_work {
+	struct list_head	  node;
+	vhost_work_fn_t		  fn;
+	wait_queue_head_t	  done;
+	int			  flushing;
+	unsigned		  queue_seq;
+	unsigned		  done_seq;
+};
+
 /* Poll a file (eventfd or socket) */
 /* Note: there's nothing vhost specific about this structure. */
 struct vhost_poll {
 	poll_table                table;
 	wait_queue_head_t        *wqh;
 	wait_queue_t              wait;
-	/* struct which will handle all actual work. */
-	struct work_struct        work;
+	struct vhost_work	  work;
 	unsigned long		  mask;
+	struct vhost_dev	 *dev;
 };
 
-void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
-		     unsigned long mask);
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+		     unsigned long mask, struct vhost_dev *dev);
 void vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
@@ -63,7 +75,7 @@
 	struct vhost_poll poll;
 
 	/* The routine to call when the Guest pings us, or timeout. */
-	work_func_t handle_kick;
+	vhost_work_fn_t handle_kick;
 
 	/* Last available index we saw. */
 	u16 last_avail_idx;
@@ -84,13 +96,15 @@
 	struct iovec indirect[VHOST_NET_MAX_SG];
 	struct iovec iov[VHOST_NET_MAX_SG];
 	struct iovec hdr[VHOST_NET_MAX_SG];
-	size_t hdr_size;
+	size_t vhost_hlen;
+	size_t sock_hlen;
+	struct vring_used_elem heads[VHOST_NET_MAX_SG];
 	/* We use a kind of RCU to access private pointer.
-	 * All readers access it from workqueue, which makes it possible to
-	 * flush the workqueue instead of synchronize_rcu. Therefore readers do
+	 * All readers access it from worker, which makes it possible to
+	 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
 	 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
-	 * work item execution acts instead of rcu_read_lock() and the end of
-	 * work item execution acts instead of rcu_read_lock().
+	 * vhost_work execution acts instead of rcu_read_lock() and the end of
+	 * vhost_work execution acts instead of rcu_read_lock().
 	 * Writers use virtqueue mutex. */
 	void *private_data;
 	/* Log write descriptors */
@@ -110,6 +124,9 @@
 	int nvqs;
 	struct file *log_file;
 	struct eventfd_ctx *log_ctx;
+	spinlock_t work_lock;
+	struct list_head work_list;
+	struct task_struct *worker;
 };
 
 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
@@ -124,21 +141,22 @@
 		      struct iovec iov[], unsigned int iov_count,
 		      unsigned int *out_num, unsigned int *in_num,
 		      struct vhost_log *log, unsigned int *log_num);
-void vhost_discard_vq_desc(struct vhost_virtqueue *);
+void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
 
 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
-void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
+int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
+		     unsigned count);
 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
-			       unsigned int head, int len);
+			       unsigned int id, int len);
+void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
+			       struct vring_used_elem *heads, unsigned count);
+void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
 void vhost_disable_notify(struct vhost_virtqueue *);
 bool vhost_enable_notify(struct vhost_virtqueue *);
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 		    unsigned int log_num, u64 len);
 
-int vhost_init(void);
-void vhost_cleanup(void);
-
 #define vq_err(vq, fmt, ...) do {                                  \
 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
 		if ((vq)->error_ctx)                               \
@@ -149,7 +167,8 @@
 	VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
 			 (1 << VIRTIO_RING_F_INDIRECT_DESC) |
 			 (1 << VHOST_F_LOG_ALL) |
-			 (1 << VHOST_NET_F_VIRTIO_NET_HDR),
+			 (1 << VHOST_NET_F_VIRTIO_NET_HDR) |
+			 (1 << VIRTIO_NET_F_MRG_RXBUF),
 };
 
 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 2fc8e14..9aa9bca 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -276,6 +276,7 @@
 		  $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
 unifdef-y += kvm_para.h
 endif
+unifdef-y += l2tp.h
 unifdef-y += llc.h
 unifdef-y += loop.h
 unifdef-y += lp.h
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 0000000..72b713a
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#ifndef __CAN_PLATFORM_FLEXCAN_H
+#define __CAN_PLATFORM_FLEXCAN_H
+
+/**
+ * struct flexcan_platform_data - flex CAN controller platform data
+ * @transceiver_enable:         - called to power on/off the transceiver
+ *
+ */
+struct flexcan_platform_data {
+	void (*transceiver_switch)(int enable);
+};
+
+#endif /* __CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0c62160..e0aa067 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,7 @@
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
+int cgroup_attach_task_current_cg(struct task_struct *);
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -626,6 +627,12 @@
 	return -EINVAL;
 }
 
+/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_current_cg(struct task_struct *t)
+{
+	return 0;
+}
+
 #endif /* !CONFIG_CGROUPS */
 
 #endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 3d7a668..848480b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -127,6 +127,20 @@
 }
 
 /**
+ * dev_hw_addr_random - Create random MAC and set device flag
+ * @dev: pointer to net_device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Generate random MAC to be used by a device and set addr_assign_type
+ * so the state can be read by sysfs and be used by udev.
+ */
+static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
+{
+	dev->addr_assign_type |= NET_ADDR_RANDOM;
+	random_ether_addr(hwaddr);
+}
+
+/**
  * compare_ether_addr - Compare two Ethernet addresses
  * @addr1: Pointer to a six-byte array containing the Ethernet address
  * @addr2: Pointer other six-byte array containing the Ethernet address
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index e24ce6e..35280b3 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -72,6 +72,8 @@
 	}
 }
 
+extern void macvlan_common_setup(struct net_device *dev);
+
 extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 				  struct nlattr *tb[], struct nlattr *data[],
 				  int (*receive)(struct sk_buff *skb),
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
index da0341b..14ba445 100644
--- a/include/linux/ks8842.h
+++ b/include/linux/ks8842.h
@@ -25,10 +25,14 @@
  * struct ks8842_platform_data - Platform data of the KS8842 network driver
  * @macaddr:	The MAC address of the device, set to all 0:s to use the on in
  *		the chip.
+ * @rx_dma_channel:	The DMA channel to use for RX, -1 for none.
+ * @tx_dma_channel:	The DMA channel to use for TX, -1 for none.
  *
  */
 struct ks8842_platform_data {
 	u8 macaddr[ETH_ALEN];
+	int rx_dma_channel;
+	int tx_dma_channel;
 };
 
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b626289..1bca617 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -66,6 +66,11 @@
 #define HAVE_FREE_NETDEV		/* free_netdev() */
 #define HAVE_NETDEV_PRIV		/* netdev_priv() */
 
+/* hardware address assignment types */
+#define NET_ADDR_PERM		0	/* address is permanent (default) */
+#define NET_ADDR_RANDOM		1	/* address is generated randomly */
+#define NET_ADDR_STOLEN		2	/* address is stolen from other device */
+
 /* Backlog congestion levels */
 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
 #define NET_RX_DROP		1	/* packet dropped */
@@ -919,6 +924,7 @@
 
 	/* Interface address info. */
 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
+	unsigned char		addr_assign_type; /* hw address assignment type */
 	unsigned char		addr_len;	/* hardware address length	*/
 	unsigned short          dev_id;		/* for shared network cards */
 
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index bb103f4..edeeabd 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -3,6 +3,7 @@
 header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
 header-y += nfnetlink_queue.h
+header-y += xt_CHECKSUM.h
 header-y += xt_CLASSIFY.h
 header-y += xt_CONNMARK.h
 header-y += xt_CONNSECMARK.h
@@ -19,17 +20,19 @@
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
 header-y += xt_TPROXY.h
+header-y += xt_cluster.h
 header-y += xt_comment.h
 header-y += xt_connbytes.h
 header-y += xt_connlimit.h
 header-y += xt_connmark.h
 header-y += xt_conntrack.h
-header-y += xt_cluster.h
+header-y += xt_cpu.h
 header-y += xt_dccp.h
 header-y += xt_dscp.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
 header-y += xt_iprange.h
+header-y += xt_ipvs.h
 header-y += xt_helper.h
 header-y += xt_length.h
 header-y += xt_limit.h
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index 1d0b84a..ea9b8d3 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -89,7 +89,7 @@
 #define NFULNL_COPY_NONE	0x00
 #define NFULNL_COPY_META	0x01
 #define NFULNL_COPY_PACKET	0x02
-#define NFULNL_COPY_DISABLED	0x03
+/* 0xff is reserved, don't use it for new copy modes. */
 
 #define NFULNL_CFG_F_SEQ	0x0001
 #define NFULNL_CFG_F_SEQ_GLOBAL	0x0002
diff --git a/include/linux/netfilter/xt_CHECKSUM.h b/include/linux/netfilter/xt_CHECKSUM.h
new file mode 100644
index 0000000..9a2e466
--- /dev/null
+++ b/include/linux/netfilter/xt_CHECKSUM.h
@@ -0,0 +1,20 @@
+/* Header file for iptables ipt_CHECKSUM target
+ *
+ * (C) 2002 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2010 Red Hat Inc
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+*/
+#ifndef _XT_CHECKSUM_TARGET_H
+#define _XT_CHECKSUM_TARGET_H
+
+#include <linux/types.h>
+
+#define XT_CHECKSUM_OP_FILL	0x01	/* fill in checksum in IP header */
+
+struct xt_CHECKSUM_info {
+	__u8 operation;	/* bitset of operations */
+};
+
+#endif /* _XT_CHECKSUM_TARGET_H */
diff --git a/include/linux/netfilter/xt_cpu.h b/include/linux/netfilter/xt_cpu.h
new file mode 100644
index 0000000..93c7f11
--- /dev/null
+++ b/include/linux/netfilter/xt_cpu.h
@@ -0,0 +1,11 @@
+#ifndef _XT_CPU_H
+#define _XT_CPU_H
+
+#include <linux/types.h>
+
+struct xt_cpu_info {
+	__u32	cpu;
+	__u32	invert;
+};
+
+#endif /*_XT_CPU_H*/
diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h
new file mode 100644
index 0000000..1167aeb
--- /dev/null
+++ b/include/linux/netfilter/xt_ipvs.h
@@ -0,0 +1,27 @@
+#ifndef _XT_IPVS_H
+#define _XT_IPVS_H
+
+enum {
+	XT_IPVS_IPVS_PROPERTY =	1 << 0, /* all other options imply this one */
+	XT_IPVS_PROTO =		1 << 1,
+	XT_IPVS_VADDR =		1 << 2,
+	XT_IPVS_VPORT =		1 << 3,
+	XT_IPVS_DIR =		1 << 4,
+	XT_IPVS_METHOD =	1 << 5,
+	XT_IPVS_VPORTCTL =	1 << 6,
+	XT_IPVS_MASK =		(1 << 7) - 1,
+	XT_IPVS_ONCE_MASK =	XT_IPVS_MASK & ~XT_IPVS_IPVS_PROPERTY
+};
+
+struct xt_ipvs_mtinfo {
+	union nf_inet_addr	vaddr, vmask;
+	__be16			vport;
+	__u8			l4proto;
+	__u8			fwd_method;
+	__be16			vportctl;
+
+	__u8			invert;
+	__u8			bitmask;
+};
+
+#endif /* _XT_IPVS_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index 8dc89df..b0d28c6 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -11,9 +11,9 @@
 struct xt_quota_info {
 	u_int32_t		flags;
 	u_int32_t		pad;
+	aligned_u64		quota;
 
 	/* Used internally by the kernel */
-	aligned_u64		quota;
 	struct xt_quota_priv	*master;
 };
 
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ae66851..384c2a2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1493,6 +1493,9 @@
 #define PCI_DEVICE_ID_SBE_WANXL100	0x0301
 #define PCI_DEVICE_ID_SBE_WANXL200	0x0302
 #define PCI_DEVICE_ID_SBE_WANXL400	0x0104
+#define PCI_SUBDEVICE_ID_SBE_T3E3	0x0009
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0	0x0901
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1	0x0902
 
 #define PCI_VENDOR_ID_TOSHIBA		0x1179
 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1	0x0101
@@ -2053,7 +2056,6 @@
 #define PCI_DEVICE_ID_NX2_57711E	0x1650
 #define PCI_DEVICE_ID_TIGON3_5705	0x1653
 #define PCI_DEVICE_ID_TIGON3_5705_2	0x1654
-#define PCI_DEVICE_ID_TIGON3_5720	0x1658
 #define PCI_DEVICE_ID_TIGON3_5721	0x1659
 #define PCI_DEVICE_ID_TIGON3_5722	0x165a
 #define PCI_DEVICE_ID_TIGON3_5723	0x165b
@@ -2067,13 +2069,11 @@
 #define PCI_DEVICE_ID_TIGON3_5754M	0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M	0x1673
 #define PCI_DEVICE_ID_TIGON3_5756	0x1674
-#define PCI_DEVICE_ID_TIGON3_5750	0x1676
 #define PCI_DEVICE_ID_TIGON3_5751	0x1677
 #define PCI_DEVICE_ID_TIGON3_5715	0x1678
 #define PCI_DEVICE_ID_TIGON3_5715S	0x1679
 #define PCI_DEVICE_ID_TIGON3_5754	0x167a
 #define PCI_DEVICE_ID_TIGON3_5755	0x167b
-#define PCI_DEVICE_ID_TIGON3_5750M	0x167c
 #define PCI_DEVICE_ID_TIGON3_5751M	0x167d
 #define PCI_DEVICE_ID_TIGON3_5751F	0x167e
 #define PCI_DEVICE_ID_TIGON3_5787F	0x167f
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index fbc8cb0..58d4449 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -282,6 +282,7 @@
 	RTA_SESSION, /* no longer used */
 	RTA_MP_ALGO, /* no longer used */
 	RTA_TABLE,
+	RTA_MARK,
 	__RTA_MAX
 };
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f5aa87e..d20d9e7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -169,6 +169,7 @@
  * @software:		generate software time stamp
  * @in_progress:	device driver is going to provide
  *			hardware time stamp
+ * @prevent_sk_orphan:	make sk reference available on driver level
  * @flags:		all shared_tx flags
  *
  * These flags are attached to packets as part of the
@@ -178,7 +179,8 @@
 	struct {
 		__u8	hardware:1,
 			software:1,
-			in_progress:1;
+			in_progress:1,
+			prevent_sk_orphan:1;
 	};
 	__u8 flags;
 };
@@ -202,10 +204,11 @@
 	 */
 	atomic_t	dataref;
 
-	skb_frag_t	frags[MAX_SKB_FRAGS];
 	/* Intermediate layers must ensure that destructor_arg
 	 * remains valid until skb destructor */
 	void *		destructor_arg;
+	/* must be last field, see pskb_expand_head() */
+	skb_frag_t	frags[MAX_SKB_FRAGS];
 };
 
 /* We divide dataref into two halves.  The higher 16 bits hold references
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index fe82b1e..a4747a0 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -632,10 +632,22 @@
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
  const union nf_inet_addr *d_addr, __be16 d_port);
 
+struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+					    struct ip_vs_protocol *pp,
+					    const struct ip_vs_iphdr *iph,
+					    unsigned int proto_off,
+					    int inverse);
+
 extern struct ip_vs_conn *ip_vs_conn_out_get
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
  const union nf_inet_addr *d_addr, __be16 d_port);
 
+struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+					     struct ip_vs_protocol *pp,
+					     const struct ip_vs_iphdr *iph,
+					     unsigned int proto_off,
+					     int inverse);
+
 /* put back the conn without restarting its timer */
 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
 {
@@ -736,8 +748,6 @@
 
 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
-			     char *o_buf, int o_len, char *n_buf, int n_len);
 extern int ip_vs_app_init(void);
 extern void ip_vs_app_cleanup(void);
 
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
index 7e58206..3bed61d 100644
--- a/include/net/irda/irda.h
+++ b/include/net/irda/irda.h
@@ -53,10 +53,6 @@
 #ifndef IRDA_ALIGN
 #  define IRDA_ALIGN __attribute__((aligned))
 #endif
-#ifndef IRDA_PACK
-#  define IRDA_PACK __attribute__((packed))
-#endif
-
 
 #ifdef CONFIG_IRDA_DEBUG
 
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 641f88e..6b1dc4f 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -85,7 +85,7 @@
 struct disc_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct xid_frame {
 	__u8  caddr; /* Connection address */
@@ -96,41 +96,41 @@
 	__u8  flags; /* Discovery flags */
 	__u8  slotnr;
 	__u8  version;
-} IRDA_PACK;
+} __packed;
 
 struct test_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
 	__le32 saddr;         /* Source device address */
 	__le32 daddr;         /* Destination device address */
-} IRDA_PACK;
+} __packed;
 
 struct ua_frame {
 	__u8 caddr;
 	__u8 control;
 	__le32 saddr; /* Source device address */
 	__le32 daddr; /* Dest device address */
-} IRDA_PACK;
+} __packed;
 
 struct dm_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct rd_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct rr_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct i_frame {
 	__u8 caddr;
 	__u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct snrm_frame {
 	__u8  caddr;
@@ -138,7 +138,7 @@
 	__le32 saddr;
 	__le32 daddr;
 	__u8  ncaddr;
-} IRDA_PACK;
+} __packed;
 
 void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
 void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, 
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index f85fc8a..b0787a1 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -419,7 +419,7 @@
 	s8 idx;
 	u8 count;
 	u8 flags;
-} __attribute__((packed));
+} __packed;
 
 /**
  * struct ieee80211_tx_info - skb transmit information
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 32d15bd..0772d29 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,9 +28,14 @@
 	char data[0];
 };
 
-static inline int nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
+static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
 {
-	return (ct->ext && ct->ext->offset[id]);
+	return !!ext->offset[id];
+}
+
+static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
+{
+	return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
 }
 
 static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
index c398017..df17bac 100644
--- a/include/net/netfilter/nf_nat_protocol.h
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -27,9 +27,9 @@
 
 	/* Alter the per-proto part of the tuple (depending on
 	   maniptype), to give a unique tuple in the given range if
-	   possible; return false if not.  Per-protocol part of tuple
-	   is initialized to the incoming packet. */
-	bool (*unique_tuple)(struct nf_conntrack_tuple *tuple,
+	   possible.  Per-protocol part of tuple is initialized to the
+	   incoming packet. */
+	void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
 			     const struct nf_nat_range *range,
 			     enum nf_nat_manip_type maniptype,
 			     const struct nf_conn *ct);
@@ -63,7 +63,7 @@
 				  const union nf_conntrack_man_proto *min,
 				  const union nf_conntrack_man_proto *max);
 
-extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
+extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
 				      const struct nf_nat_range *range,
 				      enum nf_nat_manip_type maniptype,
 				      const struct nf_conn *ct,
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
index b0569ff..e2dec42 100644
--- a/include/net/netfilter/nfnetlink_log.h
+++ b/include/net/netfilter/nfnetlink_log.h
@@ -10,5 +10,7 @@
 		  const struct nf_loginfo *li_user,
 		  const char *prefix);
 
+#define NFULNL_COPY_DISABLED    0xff
+
 #endif /* _KER_NFNETLINK_LOG_H */
 
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index ceac661..cfe2943 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -9,6 +9,7 @@
 	int			tcfm_ifindex;
 	int			tcfm_ok_push;
 	struct net_device	*tcfm_dev;
+	struct list_head	tcfm_list;
 };
 #define to_mirred(pc) \
 	container_of(pc, struct tcf_mirred, common)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 422cb19..37642ad 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1788,6 +1788,29 @@
 	return retval;
 }
 
+/**
+ * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * @tsk: the task to be attached
+ */
+int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+	struct cgroupfs_root *root;
+	struct cgroup *cur_cg;
+	int retval = 0;
+
+	cgroup_lock();
+	for_each_active_root(root) {
+		cur_cg = task_cgroup_from_root(current, root);
+		retval = cgroup_attach_task(cur_cg, tsk);
+		if (retval)
+			break;
+	}
+	cgroup_unlock();
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
  * held. May take task_lock of task
diff --git a/net/Kconfig b/net/Kconfig
index b325094..e24fa08 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -32,7 +32,7 @@
 config COMPAT_NETLINK_MESSAGES
 	def_bool y
 	depends on COMPAT
-	depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES
+	depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES
 	help
 	  This option makes it possible to send different netlink messages
 	  to tasks depending on whether the task is a compat task or not. To
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 075c435..cf09fe59 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -22,7 +22,7 @@
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* net device transmit always called with no BH (preempt_disabled) */
+/* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct net_bridge *br = netdev_priv(dev);
@@ -48,13 +48,16 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
+	rcu_read_lock();
 	if (is_multicast_ether_addr(dest)) {
 		if (unlikely(netpoll_tx_running(dev))) {
 			br_flood_deliver(br, skb);
 			goto out;
 		}
-		if (br_multicast_rcv(br, NULL, skb))
+		if (br_multicast_rcv(br, NULL, skb)) {
+			kfree_skb(skb);
 			goto out;
+		}
 
 		mdst = br_mdb_get(br, skb);
 		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
@@ -67,6 +70,7 @@
 		br_flood_deliver(br, skb);
 
 out:
+	rcu_read_unlock();
 	return NETDEV_TX_OK;
 }
 
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index a744296..90512cc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -214,7 +214,7 @@
 	spin_unlock_bh(&br->hash_lock);
 }
 
-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
+/* No locking or refcounting, assumes caller has rcu_read_lock */
 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
 					  const unsigned char *addr)
 {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5fc1c5b..826cd52 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -39,7 +39,7 @@
 		       netif_receive_skb);
 }
 
-/* note: already called with rcu_read_lock (preempt_disabled) */
+/* note: already called with rcu_read_lock */
 int br_handle_frame_finish(struct sk_buff *skb)
 {
 	const unsigned char *dest = eth_hdr(skb)->h_dest;
@@ -110,7 +110,7 @@
 	goto out;
 }
 
-/* note: already called with rcu_read_lock (preempt_disabled) */
+/* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct sk_buff *skb)
 {
 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -133,8 +133,7 @@
 
 /*
  * Return NULL if skb is handled
- * note: already called with rcu_read_lock (preempt_disabled) from
- * netif_receive_skb
+ * note: already called with rcu_read_lock
  */
 struct sk_buff *br_handle_frame(struct sk_buff *skb)
 {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 85afcda..eb5b256 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1728,13 +1728,9 @@
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 {
 	struct net_bridge_port *port;
-	int err = -ENOENT;
+	int err = 0;
 
 	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev))
-		goto unlock;
-
-	err = 0;
 	if (br->multicast_disabled == !val)
 		goto unlock;
 
@@ -1742,6 +1738,9 @@
 	if (br->multicast_disabled)
 		goto unlock;
 
+	if (!netif_running(br->dev))
+		goto unlock;
+
 	if (br->mdb) {
 		if (br->mdb->old) {
 			err = -EEXIST;
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 70aecb4..35cf270 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -131,7 +131,7 @@
 /*
  * Called from llc.
  *
- * NO locks, but rcu_read_lock (preempt_disabled)
+ * NO locks, but rcu_read_lock
  */
 void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
 		struct net_device *dev)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 4b04d25..eb16020 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-	caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size);
+	caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
 
 	/* Add info for MUX-layer to route the packet out. */
 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/can/raw.c b/net/can/raw.c
index ccfe633..a10e333 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -650,6 +650,10 @@
 	err = sock_tx_timestamp(msg, sk, skb_tx(skb));
 	if (err < 0)
 		goto free_skb;
+
+	/* to be able to check the received tx sock reference in raw_rcv() */
+	skb_tx(skb)->prevent_sk_orphan = 1;
+
 	skb->dev = dev;
 	skb->sk  = sk;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 6e1b437..e1c1cdc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -101,8 +101,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
-#include <linux/if_bridge.h>
-#include <linux/if_macvlan.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
@@ -1484,6 +1482,7 @@
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
 	skb_orphan(skb);
+	nf_reset(skb);
 
 	if (!(dev->flags & IFF_UP) ||
 	    (skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1592,9 +1591,7 @@
 
 void dev_kfree_skb_irq(struct sk_buff *skb)
 {
-	if (!skb->destructor)
-		dev_kfree_skb(skb);
-	else if (atomic_dec_and_test(&skb->users)) {
+	if (atomic_dec_and_test(&skb->users)) {
 		struct softnet_data *sd;
 		unsigned long flags;
 
@@ -2645,10 +2642,10 @@
 	int result = TC_ACT_OK;
 	struct Qdisc *q;
 
-	if (MAX_RED_LOOP < ttl++) {
-		printk(KERN_WARNING
-		       "Redir loop detected Dropping packet (%d->%d)\n",
-		       skb->skb_iif, dev->ifindex);
+	if (unlikely(MAX_RED_LOOP < ttl++)) {
+		if (net_ratelimit())
+			pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
+			       skb->skb_iif, dev->ifindex);
 		return TC_ACT_SHOT;
 	}
 
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 646ef3b..36e603c 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -347,9 +347,9 @@
 
 static int __init init_net_drop_monitor(void)
 {
-	int cpu;
-	int rc, i, ret;
 	struct per_cpu_dm_data *data;
+	int cpu, rc;
+
 	printk(KERN_INFO "Initalizing network drop monitor service\n");
 
 	if (sizeof(void *) > 8) {
@@ -357,21 +357,12 @@
 		return -ENOSPC;
 	}
 
-	if (genl_register_family(&net_drop_monitor_family) < 0) {
+	rc = genl_register_family_with_ops(&net_drop_monitor_family,
+					   dropmon_ops,
+					   ARRAY_SIZE(dropmon_ops));
+	if (rc) {
 		printk(KERN_ERR "Could not create drop monitor netlink family\n");
-		return -EFAULT;
-	}
-
-	rc = -EFAULT;
-
-	for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) {
-		ret = genl_register_ops(&net_drop_monitor_family,
-					&dropmon_ops[i]);
-		if (ret) {
-			printk(KERN_CRIT "Failed to register operation %d\n",
-				dropmon_ops[i].cmd);
-			goto out_unreg;
-		}
+		return rc;
 	}
 
 	rc = register_netdevice_notifier(&dropmon_net_notifier);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index d2b5965..af4dfba 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -95,6 +95,7 @@
 }
 
 NETDEVICE_SHOW(dev_id, fmt_hex);
+NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW(addr_len, fmt_dec);
 NETDEVICE_SHOW(iflink, fmt_dec);
 NETDEVICE_SHOW(ifindex, fmt_dec);
@@ -295,6 +296,7 @@
 }
 
 static struct device_attribute net_class_attributes[] = {
+	__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
 	__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
 	__ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
 	__ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c2b7a8b..537e01a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,6 +49,7 @@
 		(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
 				sizeof(struct iphdr) + sizeof(struct ethhdr))
 
+static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
 static unsigned int carrier_timeout = 4;
@@ -196,6 +197,7 @@
 
 	service_arp_queue(dev->npinfo);
 
+	zap_completion_queue();
 }
 EXPORT_SYMBOL(netpoll_poll_dev);
 
@@ -221,11 +223,40 @@
 	spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
+static void zap_completion_queue(void)
+{
+	unsigned long flags;
+	struct softnet_data *sd = &get_cpu_var(softnet_data);
+
+	if (sd->completion_queue) {
+		struct sk_buff *clist;
+
+		local_irq_save(flags);
+		clist = sd->completion_queue;
+		sd->completion_queue = NULL;
+		local_irq_restore(flags);
+
+		while (clist != NULL) {
+			struct sk_buff *skb = clist;
+			clist = clist->next;
+			if (skb->destructor) {
+				atomic_inc(&skb->users);
+				dev_kfree_skb_any(skb); /* put this one back */
+			} else {
+				__kfree_skb(skb);
+			}
+		}
+	}
+
+	put_cpu_var(softnet_data);
+}
+
 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
 	int count = 0;
 	struct sk_buff *skb;
 
+	zap_completion_queue();
 	refill_skbs();
 repeat:
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 24a19de..10a1ea7 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1434,18 +1434,12 @@
 		i += len;
 
 		for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
-			if (*v >= '0' && *v <= '9') {
-				*m *= 16;
-				*m += *v - '0';
-			}
-			if (*v >= 'A' && *v <= 'F') {
-				*m *= 16;
-				*m += *v - 'A' + 10;
-			}
-			if (*v >= 'a' && *v <= 'f') {
-				*m *= 16;
-				*m += *v - 'a' + 10;
-			}
+			int value;
+
+			value = hex_to_bin(*v);
+			if (value >= 0)
+				*m = *m * 16 + value;
+
 			if (*v == ':') {
 				m++;
 				*m = 0;
@@ -1476,18 +1470,12 @@
 		i += len;
 
 		for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
-			if (*v >= '0' && *v <= '9') {
-				*m *= 16;
-				*m += *v - '0';
-			}
-			if (*v >= 'A' && *v <= 'F') {
-				*m *= 16;
-				*m += *v - 'A' + 10;
-			}
-			if (*v >= 'a' && *v <= 'f') {
-				*m *= 16;
-				*m += *v - 'a' + 10;
-			}
+			int value;
+
+			value = hex_to_bin(*v);
+			if (value >= 0)
+				*m = *m * 16 + value;
+
 			if (*v == ':') {
 				m++;
 				*m = 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 76d33ca..3a2513f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -817,7 +817,7 @@
 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
 #endif
 	memcpy(data + size, skb_end_pointer(skb),
-	       sizeof(struct skb_shared_info));
+	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 		get_page(skb_shinfo(skb)->frags[i].page);
@@ -843,7 +843,9 @@
 	skb->network_header   += off;
 	if (skb_mac_header_was_set(skb))
 		skb->mac_header += off;
-	skb->csum_start       += nhead;
+	/* Only adjust this if it actually is csum_start rather than csum */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		skb->csum_start += nhead;
 	skb->cloned   = 0;
 	skb->hdr_len  = 0;
 	skb->nohdr    = 0;
@@ -930,7 +932,8 @@
 	copy_skb_header(n, skb);
 
 	off                  = newheadroom - oldheadroom;
-	n->csum_start       += off;
+	if (n->ip_summed == CHECKSUM_PARTIAL)
+		n->csum_start += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 	n->transport_header += off;
 	n->network_header   += off;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6652bd9..04b6989 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -446,7 +446,7 @@
 	int ptr;
 	struct net_device *dev;
 	struct sk_buff *skb2;
-	unsigned int mtu, hlen, left, len, ll_rs, pad;
+	unsigned int mtu, hlen, left, len, ll_rs;
 	int offset;
 	__be16 not_last_frag;
 	struct rtable *rt = skb_rtable(skb);
@@ -585,9 +585,7 @@
 	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
 	 * we need to make room for the encapsulating header
 	 */
-	pad = nf_bridge_pad(skb);
-	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad);
-	mtu -= pad;
+	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
 
 	/*
 	 *	Fragment the datagram.
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 16c0ba0..6bccba3 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -283,16 +283,13 @@
 	arp = arp_hdr(skb);
 	do {
 		const struct arpt_entry_target *t;
-		int hdr_len;
 
 		if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
 			e = arpt_next_entry(e);
 			continue;
 		}
 
-		hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
-			(2 * skb->dev->addr_len);
-		ADD_COUNTER(e->counters, hdr_len, 1);
+		ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
 
 		t = arpt_get_target_c(e);
 
@@ -713,7 +710,7 @@
 	struct arpt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu;
+	unsigned int curcpu = get_cpu();
 
 	/* Instead of clearing (by a previous call to memset())
 	 * the counters and using adds, we set the counters
@@ -723,14 +720,16 @@
 	 * if new softirq were to run and call ipt_do_table
 	 */
 	local_bh_disable();
-	curcpu = smp_processor_id();
-
 	i = 0;
 	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
 		SET_COUNTER(counters[i], iter->counters.bcnt,
 			    iter->counters.pcnt);
 		++i;
 	}
+	local_bh_enable();
+	/* Processing counters from other cpus, we can let bottom half enabled,
+	 * (preemption is disabled)
+	 */
 
 	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
@@ -744,7 +743,7 @@
 		}
 		xt_info_wrunlock(cpu);
 	}
-	local_bh_enable();
+	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index b38c118..c439721 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -364,7 +364,7 @@
 				goto no_match;
 		}
 
-		ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
+		ADD_COUNTER(e->counters, skb->len, 1);
 
 		t = ipt_get_target(e);
 		IP_NF_ASSERT(t->u.kernel.target);
@@ -884,7 +884,7 @@
 	struct ipt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu;
+	unsigned int curcpu = get_cpu();
 
 	/* Instead of clearing (by a previous call to memset())
 	 * the counters and using adds, we set the counters
@@ -894,14 +894,16 @@
 	 * if new softirq were to run and call ipt_do_table
 	 */
 	local_bh_disable();
-	curcpu = smp_processor_id();
-
 	i = 0;
 	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
 		SET_COUNTER(counters[i], iter->counters.bcnt,
 			    iter->counters.pcnt);
 		++i;
 	}
+	local_bh_enable();
+	/* Processing counters from other cpus, we can let bottom half enabled,
+	 * (preemption is disabled)
+	 */
 
 	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
@@ -915,7 +917,7 @@
 		}
 		xt_info_wrunlock(cpu);
 	}
-	local_bh_enable();
+	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 64d0875..3a43cf3 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -469,7 +469,7 @@
 	__be32 src_ip;
 	u_int8_t dst_hw[ETH_ALEN];
 	__be32 dst_ip;
-} __attribute__ ((packed));
+} __packed;
 
 #ifdef DEBUG
 static void arp_print(struct arp_payload *payload)
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index bbbd273..b254daf 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -95,10 +95,11 @@
 	}
 
 	tcph->rst	= 1;
-	tcph->check	= tcp_v4_check(sizeof(struct tcphdr),
-				       niph->saddr, niph->daddr,
-				       csum_partial(tcph,
-						    sizeof(struct tcphdr), 0));
+	tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+				    niph->daddr, 0);
+	nskb->ip_summed = CHECKSUM_PARTIAL;
+	nskb->csum_start = (unsigned char *)tcph - nskb->head;
+	nskb->csum_offset = offsetof(struct tcphdr, check);
 
 	addr_type = RTN_UNSPEC;
 	if (hook != NF_INET_FORWARD
@@ -115,7 +116,6 @@
 		goto free_nskb;
 
 	niph->ttl	= dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
-	nskb->ip_summed = CHECKSUM_NONE;
 
 	/* "Never happens" */
 	if (nskb->len > dst_mtu(skb_dst(nskb)))
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index c7719b2..8c8632d 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -261,14 +261,9 @@
 	rcu_read_lock();
 	proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
 
-	/* Change protocol info to have some randomization */
-	if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
-		proto->unique_tuple(tuple, range, maniptype, ct);
-		goto out;
-	}
-
 	/* Only bother mapping if it's not already in range and unique */
-	if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
+	if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM) &&
+	    (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
 	     proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
 	    !nf_nat_used_tuple(tuple, ct))
 		goto out;
@@ -440,7 +435,7 @@
 	if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
 		return 0;
 
-	inside = (void *)skb->data + ip_hdrlen(skb);
+	inside = (void *)skb->data + hdrlen;
 
 	/* We're actually going to mangle it beyond trivial checksum
 	   adjustment, so make sure the current checksum is correct. */
@@ -470,12 +465,10 @@
 	/* rcu_read_lock()ed by nf_hook_slow */
 	l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
 
-	if (!nf_ct_get_tuple(skb,
-			     ip_hdrlen(skb) + sizeof(struct icmphdr),
-			     (ip_hdrlen(skb) +
+	if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
+			     (hdrlen +
 			      sizeof(struct icmphdr) + inside->ip.ihl * 4),
-			     (u_int16_t)AF_INET,
-			     inside->ip.protocol,
+			     (u_int16_t)AF_INET, inside->ip.protocol,
 			     &inner, l3proto, l4proto))
 		return 0;
 
@@ -484,15 +477,13 @@
 	   pass all hooks (locally-generated ICMP).  Consider incoming
 	   packet: PREROUTING (DST manip), routing produces ICMP, goes
 	   through POSTROUTING (which must correct the DST manip). */
-	if (!manip_pkt(inside->ip.protocol, skb,
-		       ip_hdrlen(skb) + sizeof(inside->icmp),
-		       &ct->tuplehash[!dir].tuple,
-		       !manip))
+	if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
+		       &ct->tuplehash[!dir].tuple, !manip))
 		return 0;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
 		/* Reloading "inside" here since manip_pkt inner. */
-		inside = (void *)skb->data + ip_hdrlen(skb);
+		inside = (void *)skb->data + hdrlen;
 		inside->icmp.checksum = 0;
 		inside->icmp.checksum =
 			csum_fold(skb_checksum(skb, hdrlen,
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 6c4f11f..3e61faf 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -34,7 +34,7 @@
 }
 EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
 
-bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
+void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
 			       const struct nf_nat_range *range,
 			       enum nf_nat_manip_type maniptype,
 			       const struct nf_conn *ct,
@@ -53,7 +53,7 @@
 	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
 		/* If it's dst rewrite, can't change port */
 		if (maniptype == IP_NAT_MANIP_DST)
-			return false;
+			return;
 
 		if (ntohs(*portptr) < 1024) {
 			/* Loose convention: >> 512 is credential passing */
@@ -81,15 +81,15 @@
 	else
 		off = *rover;
 
-	for (i = 0; i < range_size; i++, off++) {
+	for (i = 0; ; ++off) {
 		*portptr = htons(min + off % range_size);
-		if (nf_nat_used_tuple(tuple, ct))
+		if (++i != range_size && nf_nat_used_tuple(tuple, ct))
 			continue;
 		if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
 			*rover = off;
-		return true;
+		return;
 	}
-	return false;
+	return;
 }
 EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
 
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
index 22485ce..570faf2 100644
--- a/net/ipv4/netfilter/nf_nat_proto_dccp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c
@@ -22,14 +22,14 @@
 
 static u_int16_t dccp_port_rover;
 
-static bool
+static void
 dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
 		  const struct nf_nat_range *range,
 		  enum nf_nat_manip_type maniptype,
 		  const struct nf_conn *ct)
 {
-	return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-					 &dccp_port_rover);
+	nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+				  &dccp_port_rover);
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index d7e8920..bc8d83a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -37,7 +37,7 @@
 MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
 
 /* generate unique tuple ... */
-static bool
+static void
 gre_unique_tuple(struct nf_conntrack_tuple *tuple,
 		 const struct nf_nat_range *range,
 		 enum nf_nat_manip_type maniptype,
@@ -50,7 +50,7 @@
 	/* If there is no master conntrack we are not PPTP,
 	   do not change tuples */
 	if (!ct->master)
-		return false;
+		return;
 
 	if (maniptype == IP_NAT_MANIP_SRC)
 		keyptr = &tuple->src.u.gre.key;
@@ -68,14 +68,14 @@
 
 	pr_debug("min = %u, range_size = %u\n", min, range_size);
 
-	for (i = 0; i < range_size; i++, key++) {
+	for (i = 0; ; ++key) {
 		*keyptr = htons(min + key % range_size);
-		if (!nf_nat_used_tuple(tuple, ct))
-			return true;
+		if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+			return;
 	}
 
 	pr_debug("%p: no NAT mapping\n", ct);
-	return false;
+	return;
 }
 
 /* manipulate a GRE packet according to maniptype */
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 19a8b0b..5744c3e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -27,7 +27,7 @@
 	       ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
 }
 
-static bool
+static void
 icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
 		  const struct nf_nat_range *range,
 		  enum nf_nat_manip_type maniptype,
@@ -42,13 +42,13 @@
 	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
 		range_size = 0xFFFF;
 
-	for (i = 0; i < range_size; i++, id++) {
+	for (i = 0; ; ++id) {
 		tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
 					     (id % range_size));
-		if (!nf_nat_used_tuple(tuple, ct))
-			return true;
+		if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+			return;
 	}
-	return false;
+	return;
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 3fc598e..756331d 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -16,14 +16,14 @@
 
 static u_int16_t nf_sctp_port_rover;
 
-static bool
+static void
 sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
 		  const struct nf_nat_range *range,
 		  enum nf_nat_manip_type maniptype,
 		  const struct nf_conn *ct)
 {
-	return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-					 &nf_sctp_port_rover);
+	nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+				  &nf_sctp_port_rover);
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 399e2cf..aa460a5 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -20,14 +20,13 @@
 
 static u_int16_t tcp_port_rover;
 
-static bool
+static void
 tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
 		 const struct nf_nat_range *range,
 		 enum nf_nat_manip_type maniptype,
 		 const struct nf_conn *ct)
 {
-	return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-					 &tcp_port_rover);
+	nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 9e61c79..dfe65c7 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -19,14 +19,13 @@
 
 static u_int16_t udp_port_rover;
 
-static bool
+static void
 udp_unique_tuple(struct nf_conntrack_tuple *tuple,
 		 const struct nf_nat_range *range,
 		 enum nf_nat_manip_type maniptype,
 		 const struct nf_conn *ct)
 {
-	return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-					 &udp_port_rover);
+	nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index 440a229..3cc8c8a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -18,14 +18,14 @@
 
 static u_int16_t udplite_port_rover;
 
-static bool
+static void
 udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
 		     const struct nf_nat_range *range,
 		     enum nf_nat_manip_type maniptype,
 		     const struct nf_conn *ct)
 {
-	return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-					 &udplite_port_rover);
+	nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+				  &udplite_port_rover);
 }
 
 static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
index 14381c6..a50f2bc 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -26,14 +26,14 @@
 	return true;
 }
 
-static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
+static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
 				 const struct nf_nat_range *range,
 				 enum nf_nat_manip_type maniptype,
 				 const struct nf_conn *ct)
 {
 	/* Sorry: we can't help you; if it's not unique, we can't frob
 	   anything. */
-	return false;
+	return;
 }
 
 static bool
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 562ce92..3f56b6e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2878,6 +2878,9 @@
 	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
 		goto nla_put_failure;
 
+	if (rt->fl.mark)
+		NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
+
 	error = rt->dst.error;
 	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
 	if (rt->peer) {
@@ -2933,6 +2936,7 @@
 	__be32 src = 0;
 	u32 iif;
 	int err;
+	int mark;
 	struct sk_buff *skb;
 
 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
@@ -2960,6 +2964,7 @@
 	src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
 	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
+	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 
 	if (iif) {
 		struct net_device *dev;
@@ -2972,6 +2977,7 @@
 
 		skb->protocol	= htons(ETH_P_IP);
 		skb->dev	= dev;
+		skb->mark	= mark;
 		local_bh_disable();
 		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
 		local_bh_enable();
@@ -2989,6 +2995,7 @@
 				},
 			},
 			.oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
+			.mark = mark,
 		};
 		err = ip_route_output_key(net, &rt, &fl);
 	}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86b9f67..176e11a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2187,6 +2187,8 @@
 				      GFP_KERNEL);
 			if (cvp == NULL)
 				return -ENOMEM;
+
+			kref_init(&cvp->kref);
 		}
 		lock_sock(sk);
 		tp->rx_opt.cookie_in_always =
@@ -2201,12 +2203,11 @@
 				 */
 				kref_put(&tp->cookie_values->kref,
 					 tcp_cookie_values_release);
-				kref_init(&cvp->kref);
-				tp->cookie_values = cvp;
 			} else {
 				cvp = tp->cookie_values;
 			}
 		}
+
 		if (cvp != NULL) {
 			cvp->cookie_desired = ctd.tcpct_cookie_desired;
 
@@ -2220,6 +2221,8 @@
 				cvp->s_data_desired = ctd.tcpct_s_data_desired;
 				cvp->s_data_constant = 0; /* false */
 			}
+
+			tp->cookie_values = cvp;
 		}
 		release_sock(sk);
 		return err;
@@ -2601,6 +2604,12 @@
 			return -EFAULT;
 		return 0;
 	}
+	case TCP_THIN_LINEAR_TIMEOUTS:
+		val = tp->thin_lto;
+		break;
+	case TCP_THIN_DUPACK:
+		val = tp->thin_dupack;
+		break;
 	default:
 		return -ENOPROTOOPT;
 	}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e81155d..ab70a3f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1763,7 +1763,10 @@
 
 	idev = ipv6_find_idev(dev);
 	if (!idev)
-		return NULL;
+		return ERR_PTR(-ENOBUFS);
+
+	if (idev->cnf.disable_ipv6)
+		return ERR_PTR(-EACCES);
 
 	/* Add default multicast route */
 	addrconf_add_mroute(dev);
@@ -2132,8 +2135,9 @@
 	if (!dev)
 		return -ENODEV;
 
-	if ((idev = addrconf_add_dev(dev)) == NULL)
-		return -ENOBUFS;
+	idev = addrconf_add_dev(dev);
+	if (IS_ERR(idev))
+		return PTR_ERR(idev);
 
 	scope = ipv6_addr_scope(pfx);
 
@@ -2380,7 +2384,7 @@
 	}
 
 	idev = addrconf_add_dev(dev);
-	if (idev == NULL)
+	if (IS_ERR(idev))
 		return;
 
 	memset(&addr, 0, sizeof(struct in6_addr));
@@ -2471,7 +2475,7 @@
 	ASSERT_RTNL();
 
 	idev = addrconf_add_dev(dev);
-	if (!idev) {
+	if (IS_ERR(idev)) {
 		printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
 		return;
 	}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index dc41d6d..5359ef4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -387,9 +387,7 @@
 				goto no_match;
 		}
 
-		ADD_COUNTER(e->counters,
-			    ntohs(ipv6_hdr(skb)->payload_len) +
-			    sizeof(struct ipv6hdr), 1);
+		ADD_COUNTER(e->counters, skb->len, 1);
 
 		t = ip6t_get_target_c(e);
 		IP_NF_ASSERT(t->u.kernel.target);
@@ -899,7 +897,7 @@
 	struct ip6t_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu;
+	unsigned int curcpu = get_cpu();
 
 	/* Instead of clearing (by a previous call to memset())
 	 * the counters and using adds, we set the counters
@@ -909,14 +907,16 @@
 	 * if new softirq were to run and call ipt_do_table
 	 */
 	local_bh_disable();
-	curcpu = smp_processor_id();
-
 	i = 0;
 	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
 		SET_COUNTER(counters[i], iter->counters.bcnt,
 			    iter->counters.pcnt);
 		++i;
 	}
+	local_bh_enable();
+	/* Processing counters from other cpus, we can let bottom half enabled,
+	 * (preemption is disabled)
+	 */
 
 	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
@@ -930,7 +930,7 @@
 		}
 		xt_info_wrunlock(cpu);
 	}
-	local_bh_enable();
+	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 9254008..098a050 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -269,6 +269,11 @@
 	 * in the chain of fragments so far.  We must know where to put
 	 * this fragment, right?
 	 */
+	prev = fq->q.fragments_tail;
+	if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
+		next = NULL;
+		goto found;
+	}
 	prev = NULL;
 	for (next = fq->q.fragments; next != NULL; next = next->next) {
 		if (NFCT_FRAG6_CB(next)->offset >= offset)
@@ -276,6 +281,7 @@
 		prev = next;
 	}
 
+found:
 	/* We found where to put this one.  Check for overlap with
 	 * preceding fragment, and, if needed, align things so that
 	 * any overlaps are eliminated.
@@ -341,6 +347,8 @@
 
 	/* Insert this fragment in the chain of fragments. */
 	skb->next = next;
+	if (!next)
+		fq->q.fragments_tail = skb;
 	if (prev)
 		prev->next = skb;
 	else
@@ -464,6 +472,7 @@
 					  head->csum);
 
 	fq->q.fragments = NULL;
+	fq->q.fragments_tail = NULL;
 
 	/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
 	fp = skb_shinfo(head)->frag_list;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index dab6b8e..29ac8e1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -627,7 +627,7 @@
 	skb->dev = sta->sdata->dev;
 	skb->protocol = eth_type_trans(skb, sta->sdata->dev);
 	memset(skb->cb, 0, sizeof(skb->cb));
-	netif_rx(skb);
+	netif_rx_ni(skb);
 }
 
 static void sta_apply_parameters(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 7cc4f91..798a91b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -685,10 +685,12 @@
 
 	return 0;
 
+#ifdef CONFIG_INET
  fail_ifa:
 	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
 			       &local->network_latency_notifier);
 	rtnl_lock();
+#endif
  fail_pm_qos:
 	ieee80211_led_exit(local);
 	ieee80211_remove_interfaces(local);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 41f20fb..872d7b6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -400,19 +400,7 @@
 	else
 		__set_bit(SCAN_SW_SCANNING, &local->scanning);
 
-	/*
-	 * Kicking off the scan need not be protected,
-	 * only the scan variable stuff, since now
-	 * local->scan_req is assigned and other callers
-	 * will abort their scan attempts.
-	 *
-	 * This avoids too many locking dependencies
-	 * so that the scan completed calls have more
-	 * locking freedom.
-	 */
-
 	ieee80211_recalc_idle(local);
-	mutex_unlock(&local->scan_mtx);
 
 	if (local->ops->hw_scan) {
 		WARN_ON(!ieee80211_prep_hw_scan(local));
@@ -420,8 +408,6 @@
 	} else
 		rc = ieee80211_start_sw_scan(local);
 
-	mutex_lock(&local->scan_mtx);
-
 	if (rc) {
 		kfree(local->hw_scan_req);
 		local->hw_scan_req = NULL;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index aa2f106..4328825 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -326,6 +326,22 @@
 
 comment "Xtables targets"
 
+config NETFILTER_XT_TARGET_CHECKSUM
+	tristate "CHECKSUM target support"
+	depends on IP_NF_MANGLE || IP6_NF_MANGLE
+	depends on NETFILTER_ADVANCED
+	---help---
+	  This option adds a `CHECKSUM' target, which can be used in the iptables mangle
+	  table.
+
+	  You can use this target to compute and fill in the checksum in
+	  a packet that lacks a checksum.  This is particularly useful,
+	  if you need to work around old applications such as dhcp clients,
+	  that do not work well with checksum offloads, but don't want to disable
+	  checksum offload in your device.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_CLASSIFY
 	tristate '"CLASSIFY" target support'
 	depends on NETFILTER_ADVANCED
@@ -647,6 +663,15 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_CPU
+	tristate '"cpu" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  CPU matching allows you to match packets based on the CPU
+	  currently handling the packet.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_DCCP
 	tristate '"dccp" protocol match support'
 	depends on NETFILTER_ADVANCED
@@ -726,6 +751,16 @@
 
 	If unsure, say M.
 
+config NETFILTER_XT_MATCH_IPVS
+	tristate '"ipvs" match support'
+	depends on IP_VS
+	depends on NETFILTER_ADVANCED
+	depends on NF_CONNTRACK
+	help
+	  This option allows you to match against IPVS properties of a packet.
+
+	  If unsure, say N.
+
 config NETFILTER_XT_MATCH_LENGTH
 	tristate '"length" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index e28420a..441050f 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -45,6 +45,7 @@
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 
 # targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
@@ -69,6 +70,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
@@ -76,6 +78,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 712ccad..46a77d5 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -3,7 +3,7 @@
 #
 menuconfig IP_VS
 	tristate "IP virtual server support"
-	depends on NET && INET && NETFILTER
+	depends on NET && INET && NETFILTER && NF_CONNTRACK
 	---help---
 	  IP Virtual Server support will let you build a high-performance
 	  virtual server based on cluster of two or more real servers. This
@@ -26,7 +26,7 @@
 
 config	IP_VS_IPV6
 	bool "IPv6 support for IPVS"
-	depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
+	depends on IPV6 = y || IP_VS = IPV6
 	---help---
 	  Add IPv6 support to IPVS. This is incomplete and might be dangerous.
 
@@ -87,19 +87,16 @@
 	  protocol. Say Y if unsure.
 
 config	IP_VS_PROTO_AH_ESP
-	bool
-	depends on UNDEFINED
+	def_bool IP_VS_PROTO_ESP || IP_VS_PROTO_AH
 
 config	IP_VS_PROTO_ESP
 	bool "ESP load balancing support"
-	select IP_VS_PROTO_AH_ESP
 	---help---
 	  This option enables support for load balancing ESP (Encapsulation
 	  Security Payload) transport protocol. Say Y if unsure.
 
 config	IP_VS_PROTO_AH
 	bool "AH load balancing support"
-	select IP_VS_PROTO_AH_ESP
 	---help---
 	  This option enables support for load balancing AH (Authentication
 	  Header) transport protocol. Say Y if unsure.
@@ -238,7 +235,7 @@
 
 config	IP_VS_FTP
   	tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP
+        depends on IP_VS_PROTO_TCP && NF_NAT
 	---help---
 	  FTP is a protocol that transfers IP address and/or port number in
 	  the payload. In the virtual server via Network Address Translation,
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 1cb0e83..e76f87f 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -569,49 +569,6 @@
 };
 #endif
 
-
-/*
- *	Replace a segment of data with a new segment
- */
-int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
-		      char *o_buf, int o_len, char *n_buf, int n_len)
-{
-	int diff;
-	int o_offset;
-	int o_left;
-
-	EnterFunction(9);
-
-	diff = n_len - o_len;
-	o_offset = o_buf - (char *)skb->data;
-	/* The length of left data after o_buf+o_len in the skb data */
-	o_left = skb->len - (o_offset + o_len);
-
-	if (diff <= 0) {
-		memmove(o_buf + n_len, o_buf + o_len, o_left);
-		memcpy(o_buf, n_buf, n_len);
-		skb_trim(skb, skb->len + diff);
-	} else if (diff <= skb_tailroom(skb)) {
-		skb_put(skb, diff);
-		memmove(o_buf + n_len, o_buf + o_len, o_left);
-		memcpy(o_buf, n_buf, n_len);
-	} else {
-		if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
-			return -ENOMEM;
-		skb_put(skb, diff);
-		memmove(skb->data + o_offset + n_len,
-			skb->data + o_offset + o_len, o_left);
-		skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
-	}
-
-	/* must update the iph total length here */
-	ip_hdr(skb)->tot_len = htons(skb->len);
-
-	LeaveFunction(9);
-	return 0;
-}
-
-
 int __init ip_vs_app_init(void)
 {
 	/* we will replace it with proc_net_ipvs_create() soon */
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 654544e..b71c69a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -271,6 +271,29 @@
 	return cp;
 }
 
+struct ip_vs_conn *
+ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+			struct ip_vs_protocol *pp,
+			const struct ip_vs_iphdr *iph,
+			unsigned int proto_off, int inverse)
+{
+	__be16 _ports[2], *pptr;
+
+	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+	if (pptr == NULL)
+		return NULL;
+
+	if (likely(!inverse))
+		return ip_vs_conn_in_get(af, iph->protocol,
+					 &iph->saddr, pptr[0],
+					 &iph->daddr, pptr[1]);
+	else
+		return ip_vs_conn_in_get(af, iph->protocol,
+					 &iph->daddr, pptr[1],
+					 &iph->saddr, pptr[0]);
+}
+EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
+
 /* Get reference to connection template */
 struct ip_vs_conn *ip_vs_ct_in_get
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
@@ -356,6 +379,28 @@
 	return ret;
 }
 
+struct ip_vs_conn *
+ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+			 struct ip_vs_protocol *pp,
+			 const struct ip_vs_iphdr *iph,
+			 unsigned int proto_off, int inverse)
+{
+	__be16 _ports[2], *pptr;
+
+	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+	if (pptr == NULL)
+		return NULL;
+
+	if (likely(!inverse))
+		return ip_vs_conn_out_get(af, iph->protocol,
+					  &iph->saddr, pptr[0],
+					  &iph->daddr, pptr[1]);
+	else
+		return ip_vs_conn_out_get(af, iph->protocol,
+					  &iph->daddr, pptr[1],
+					  &iph->saddr, pptr[0]);
+}
+EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
 
 /*
  *      Put back the conn and restart its timer with its timeout
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 50907d8..4f8ddba 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -54,7 +54,6 @@
 
 EXPORT_SYMBOL(register_ip_vs_scheduler);
 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
-EXPORT_SYMBOL(ip_vs_skb_replace);
 EXPORT_SYMBOL(ip_vs_proto_name);
 EXPORT_SYMBOL(ip_vs_conn_new);
 EXPORT_SYMBOL(ip_vs_conn_in_get);
@@ -536,26 +535,6 @@
 	return NF_DROP;
 }
 
-
-/*
- *      It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING
- *      chain, and is used for VS/NAT.
- *      It detects packets for VS/NAT connections and sends the packets
- *      immediately. This can avoid that iptable_nat mangles the packets
- *      for VS/NAT.
- */
-static unsigned int ip_vs_post_routing(unsigned int hooknum,
-				       struct sk_buff *skb,
-				       const struct net_device *in,
-				       const struct net_device *out,
-				       int (*okfn)(struct sk_buff *))
-{
-	if (!skb->ipvs_property)
-		return NF_ACCEPT;
-	/* The packet was sent from IPVS, exit this chain */
-	return NF_STOP;
-}
-
 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
 {
 	return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
@@ -1499,14 +1478,6 @@
 		.hooknum        = NF_INET_FORWARD,
 		.priority       = 99,
 	},
-	/* Before the netfilter connection tracking, exit from POST_ROUTING */
-	{
-		.hook		= ip_vs_post_routing,
-		.owner		= THIS_MODULE,
-		.pf		= PF_INET,
-		.hooknum        = NF_INET_POST_ROUTING,
-		.priority       = NF_IP_PRI_NAT_SRC-1,
-	},
 #ifdef CONFIG_IP_VS_IPV6
 	/* After packet filtering, forward packet through VS/DR, VS/TUN,
 	 * or VS/NAT(change destination), so that filtering rules can be
@@ -1535,14 +1506,6 @@
 		.hooknum        = NF_INET_FORWARD,
 		.priority       = 99,
 	},
-	/* Before the netfilter connection tracking, exit from POST_ROUTING */
-	{
-		.hook		= ip_vs_post_routing,
-		.owner		= THIS_MODULE,
-		.pf		= PF_INET6,
-		.hooknum        = NF_INET_POST_ROUTING,
-		.priority       = NF_IP6_PRI_NAT_SRC-1,
-	},
 #endif
 };
 
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 2ae747a..f228a17 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -20,6 +20,17 @@
  *
  * Author:	Wouter Gadeyne
  *
+ *
+ * Code for ip_vs_expect_related and ip_vs_expect_callback is taken from
+ * http://www.ssi.bg/~ja/nfct/:
+ *
+ * ip_vs_nfct.c:	Netfilter connection tracking support for IPVS
+ *
+ * Portions Copyright (C) 2001-2002
+ * Antefacto Ltd, 181 Parnell St, Dublin 1, Ireland.
+ *
+ * Portions Copyright (C) 2003-2008
+ * Julian Anastasov
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -32,6 +43,9 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
 #include <linux/gfp.h>
 #include <net/protocol.h>
 #include <net/tcp.h>
@@ -43,6 +57,16 @@
 #define SERVER_STRING "227 Entering Passive Mode ("
 #define CLIENT_STRING "PORT "
 
+#define FMT_TUPLE	"%pI4:%u->%pI4:%u/%u"
+#define ARG_TUPLE(T)	&(T)->src.u3.ip, ntohs((T)->src.u.all), \
+			&(T)->dst.u3.ip, ntohs((T)->dst.u.all), \
+			(T)->dst.protonum
+
+#define FMT_CONN	"%pI4:%u->%pI4:%u->%pI4:%u/%u:%u"
+#define ARG_CONN(C)	&((C)->caddr.ip), ntohs((C)->cport), \
+			&((C)->vaddr.ip), ntohs((C)->vport), \
+			&((C)->daddr.ip), ntohs((C)->dport), \
+			(C)->protocol, (C)->state
 
 /*
  * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
@@ -123,6 +147,119 @@
 	return 1;
 }
 
+/*
+ * Called from init_conntrack() as expectfn handler.
+ */
+static void
+ip_vs_expect_callback(struct nf_conn *ct,
+		      struct nf_conntrack_expect *exp)
+{
+	struct nf_conntrack_tuple *orig, new_reply;
+	struct ip_vs_conn *cp;
+
+	if (exp->tuple.src.l3num != PF_INET)
+		return;
+
+	/*
+	 * We assume that no NF locks are held before this callback.
+	 * ip_vs_conn_out_get and ip_vs_conn_in_get should match their
+	 * expectations even if they use wildcard values, now we provide the
+	 * actual values from the newly created original conntrack direction.
+	 * The conntrack is confirmed when packet reaches IPVS hooks.
+	 */
+
+	/* RS->CLIENT */
+	orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	cp = ip_vs_conn_out_get(exp->tuple.src.l3num, orig->dst.protonum,
+				&orig->src.u3, orig->src.u.tcp.port,
+				&orig->dst.u3, orig->dst.u.tcp.port);
+	if (cp) {
+		/* Change reply CLIENT->RS to CLIENT->VS */
+		new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
+			  FMT_TUPLE ", found inout cp=" FMT_CONN "\n",
+			  __func__, ct, ct->status,
+			  ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+			  ARG_CONN(cp));
+		new_reply.dst.u3 = cp->vaddr;
+		new_reply.dst.u.tcp.port = cp->vport;
+		IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", " FMT_TUPLE
+			  ", inout cp=" FMT_CONN "\n",
+			  __func__, ct,
+			  ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+			  ARG_CONN(cp));
+		goto alter;
+	}
+
+	/* CLIENT->VS */
+	cp = ip_vs_conn_in_get(exp->tuple.src.l3num, orig->dst.protonum,
+			       &orig->src.u3, orig->src.u.tcp.port,
+			       &orig->dst.u3, orig->dst.u.tcp.port);
+	if (cp) {
+		/* Change reply VS->CLIENT to RS->CLIENT */
+		new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
+			  FMT_TUPLE ", found outin cp=" FMT_CONN "\n",
+			  __func__, ct, ct->status,
+			  ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+			  ARG_CONN(cp));
+		new_reply.src.u3 = cp->daddr;
+		new_reply.src.u.tcp.port = cp->dport;
+		IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", "
+			  FMT_TUPLE ", outin cp=" FMT_CONN "\n",
+			  __func__, ct,
+			  ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+			  ARG_CONN(cp));
+		goto alter;
+	}
+
+	IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuple=" FMT_TUPLE
+		  " - unknown expect\n",
+		  __func__, ct, ct->status, ARG_TUPLE(orig));
+	return;
+
+alter:
+	/* Never alter conntrack for non-NAT conns */
+	if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ)
+		nf_conntrack_alter_reply(ct, &new_reply);
+	ip_vs_conn_put(cp);
+	return;
+}
+
+/*
+ * Create NF conntrack expectation with wildcard (optional) source port.
+ * Then the default callback function will alter the reply and will confirm
+ * the conntrack entry when the first packet comes.
+ */
+static void
+ip_vs_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+		     struct ip_vs_conn *cp, u_int8_t proto,
+		     const __be16 *port, int from_rs)
+{
+	struct nf_conntrack_expect *exp;
+
+	BUG_ON(!ct || ct == &nf_conntrack_untracked);
+
+	exp = nf_ct_expect_alloc(ct);
+	if (!exp)
+		return;
+
+	if (from_rs)
+		nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
+				  nf_ct_l3num(ct), &cp->daddr, &cp->caddr,
+				  proto, port, &cp->cport);
+	else
+		nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
+				  nf_ct_l3num(ct), &cp->caddr, &cp->vaddr,
+				  proto, port, &cp->vport);
+
+	exp->expectfn = ip_vs_expect_callback;
+
+	IP_VS_DBG(7, "%s(): ct=%p, expect tuple=" FMT_TUPLE "\n",
+		  __func__, ct, ARG_TUPLE(&exp->tuple));
+	nf_ct_expect_related(exp);
+	nf_ct_expect_put(exp);
+}
 
 /*
  * Look at outgoing ftp packets to catch the response to a PASV command
@@ -149,7 +286,9 @@
 	struct ip_vs_conn *n_cp;
 	char buf[24];		/* xxx.xxx.xxx.xxx,ppp,ppp\000 */
 	unsigned buf_len;
-	int ret;
+	int ret = 0;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
 	/* This application helper doesn't work with IPv6 yet,
@@ -219,20 +358,27 @@
 
 		buf_len = strlen(buf);
 
-		/*
-		 * Calculate required delta-offset to keep TCP happy
-		 */
-		*diff = buf_len - (end-start);
-
-		if (*diff == 0) {
-			/* simply replace it with new passive address */
-			memcpy(start, buf, buf_len);
-			ret = 1;
-		} else {
-			ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start,
-					  end-start, buf, buf_len);
+		ct = nf_ct_get(skb, &ctinfo);
+		if (ct && !nf_ct_is_untracked(ct)) {
+			/* If mangling fails this function will return 0
+			 * which will cause the packet to be dropped.
+			 * Mangling can only fail under memory pressure,
+			 * hopefully it will succeed on the retransmitted
+			 * packet.
+			 */
+			ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+						       start-data, end-start,
+						       buf, buf_len);
+			if (ret)
+				ip_vs_expect_related(skb, ct, n_cp,
+						     IPPROTO_TCP, NULL, 0);
 		}
 
+		/*
+		 * Not setting 'diff' is intentional, otherwise the sequence
+		 * would be adjusted twice.
+		 */
+
 		cp->app_data = NULL;
 		ip_vs_tcp_conn_listen(n_cp);
 		ip_vs_conn_put(n_cp);
@@ -263,6 +409,7 @@
 	union nf_inet_addr to;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
+	struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
 	/* This application helper doesn't work with IPv6 yet,
@@ -349,6 +496,11 @@
 		ip_vs_control_add(n_cp, cp);
 	}
 
+	ct = (struct nf_conn *)skb->nfct;
+	if (ct && ct != &nf_conntrack_untracked)
+		ip_vs_expect_related(skb, ct, n_cp,
+				     IPPROTO_TCP, &n_cp->dport, 1);
+
 	/*
 	 *	Move tunnel to listen state
 	 */
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 2d3d5e4..027f654 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -98,6 +98,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(ip_vs_proto_get);
 
 
 /*
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index c9a3f7a..4c0855c 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -8,55 +8,6 @@
 #include <net/sctp/checksum.h>
 #include <net/ip_vs.h>
 
-
-static struct ip_vs_conn *
-sctp_conn_in_get(int af,
-		 const struct sk_buff *skb,
-		 struct ip_vs_protocol *pp,
-		 const struct ip_vs_iphdr *iph,
-		 unsigned int proto_off,
-		 int inverse)
-{
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) 
-		return ip_vs_conn_in_get(af, iph->protocol,
-					 &iph->saddr, pptr[0],
-					 &iph->daddr, pptr[1]);
-	else 
-		return ip_vs_conn_in_get(af, iph->protocol,
-					 &iph->daddr, pptr[1],
-					 &iph->saddr, pptr[0]);
-}
-
-static struct ip_vs_conn *
-sctp_conn_out_get(int af,
-		  const struct sk_buff *skb,
-		  struct ip_vs_protocol *pp,
-		  const struct ip_vs_iphdr *iph,
-		  unsigned int proto_off,
-		  int inverse)
-{
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) 
-		return ip_vs_conn_out_get(af, iph->protocol,
-					  &iph->saddr, pptr[0],
-					  &iph->daddr, pptr[1]);
-	else 
-		return ip_vs_conn_out_get(af, iph->protocol,
-					  &iph->daddr, pptr[1],
-					  &iph->saddr, pptr[0]);
-}
-
 static int
 sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 		   int *verdict, struct ip_vs_conn **cpp)
@@ -173,7 +124,7 @@
 			return 0;
 
 		/* Call application helper if needed */
-		if (!ip_vs_app_pkt_out(cp, skb))
+		if (!ip_vs_app_pkt_in(cp, skb))
 			return 0;
 	}
 
@@ -1169,8 +1120,8 @@
 	.register_app = sctp_register_app,
 	.unregister_app = sctp_unregister_app,
 	.conn_schedule = sctp_conn_schedule,
-	.conn_in_get = sctp_conn_in_get,
-	.conn_out_get = sctp_conn_out_get,
+	.conn_in_get = ip_vs_conn_in_get_proto,
+	.conn_out_get = ip_vs_conn_out_get_proto,
 	.snat_handler = sctp_snat_handler,
 	.dnat_handler = sctp_dnat_handler,
 	.csum_check = sctp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 91d28e0..282d24d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -27,52 +27,6 @@
 
 #include <net/ip_vs.h>
 
-
-static struct ip_vs_conn *
-tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		const struct ip_vs_iphdr *iph, unsigned int proto_off,
-		int inverse)
-{
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) {
-		return ip_vs_conn_in_get(af, iph->protocol,
-					 &iph->saddr, pptr[0],
-					 &iph->daddr, pptr[1]);
-	} else {
-		return ip_vs_conn_in_get(af, iph->protocol,
-					 &iph->daddr, pptr[1],
-					 &iph->saddr, pptr[0]);
-	}
-}
-
-static struct ip_vs_conn *
-tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		 const struct ip_vs_iphdr *iph, unsigned int proto_off,
-		 int inverse)
-{
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) {
-		return ip_vs_conn_out_get(af, iph->protocol,
-					  &iph->saddr, pptr[0],
-					  &iph->daddr, pptr[1]);
-	} else {
-		return ip_vs_conn_out_get(af, iph->protocol,
-					  &iph->daddr, pptr[1],
-					  &iph->saddr, pptr[0]);
-	}
-}
-
-
 static int
 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 		  int *verdict, struct ip_vs_conn **cpp)
@@ -721,8 +675,8 @@
 	.register_app =		tcp_register_app,
 	.unregister_app =	tcp_unregister_app,
 	.conn_schedule =	tcp_conn_schedule,
-	.conn_in_get =		tcp_conn_in_get,
-	.conn_out_get =		tcp_conn_out_get,
+	.conn_in_get =		ip_vs_conn_in_get_proto,
+	.conn_out_get =		ip_vs_conn_out_get_proto,
 	.snat_handler =		tcp_snat_handler,
 	.dnat_handler =		tcp_dnat_handler,
 	.csum_check =		tcp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index e7a6885..8553231 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -27,58 +27,6 @@
 #include <net/ip.h>
 #include <net/ip6_checksum.h>
 
-static struct ip_vs_conn *
-udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		const struct ip_vs_iphdr *iph, unsigned int proto_off,
-		int inverse)
-{
-	struct ip_vs_conn *cp;
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_in_get(af, iph->protocol,
-				       &iph->saddr, pptr[0],
-				       &iph->daddr, pptr[1]);
-	} else {
-		cp = ip_vs_conn_in_get(af, iph->protocol,
-				       &iph->daddr, pptr[1],
-				       &iph->saddr, pptr[0]);
-	}
-
-	return cp;
-}
-
-
-static struct ip_vs_conn *
-udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-		 const struct ip_vs_iphdr *iph, unsigned int proto_off,
-		 int inverse)
-{
-	struct ip_vs_conn *cp;
-	__be16 _ports[2], *pptr;
-
-	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-	if (pptr == NULL)
-		return NULL;
-
-	if (likely(!inverse)) {
-		cp = ip_vs_conn_out_get(af, iph->protocol,
-					&iph->saddr, pptr[0],
-					&iph->daddr, pptr[1]);
-	} else {
-		cp = ip_vs_conn_out_get(af, iph->protocol,
-					&iph->daddr, pptr[1],
-					&iph->saddr, pptr[0]);
-	}
-
-	return cp;
-}
-
-
 static int
 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 		  int *verdict, struct ip_vs_conn **cpp)
@@ -520,8 +468,8 @@
 	.init =			udp_init,
 	.exit =			udp_exit,
 	.conn_schedule =	udp_conn_schedule,
-	.conn_in_get =		udp_conn_in_get,
-	.conn_out_get =		udp_conn_out_get,
+	.conn_in_get =		ip_vs_conn_in_get_proto,
+	.conn_out_get =		ip_vs_conn_out_get_proto,
 	.snat_handler =		udp_snat_handler,
 	.dnat_handler =		udp_dnat_handler,
 	.csum_check =		udp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 02b078e..21e1a5e 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -28,6 +28,7 @@
 #include <net/ip6_route.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
 #include <linux/netfilter_ipv4.h>
 
 #include <net/ip_vs.h>
@@ -348,6 +349,30 @@
 }
 #endif
 
+static void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+{
+	struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+	struct nf_conntrack_tuple new_tuple;
+
+	if (ct == NULL || nf_ct_is_untracked(ct) || nf_ct_is_confirmed(ct))
+		return;
+
+	/*
+	 * The connection is not yet in the hashtable, so we update it.
+	 * CIP->VIP will remain the same, so leave the tuple in
+	 * IP_CT_DIR_ORIGINAL untouched.  When the reply comes back from the
+	 * real-server we will see RIP->DIP.
+	 */
+	new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+	new_tuple.src.u3 = cp->daddr;
+	/*
+	 * This will also take care of UDP and other protocols.
+	 */
+	new_tuple.src.u.tcp.port = cp->dport;
+	nf_conntrack_alter_reply(ct, &new_tuple);
+}
+
 /*
  *      NAT transmitter (only for outside-to-inside nat forwarding)
  *      Not used for related ICMP
@@ -403,6 +428,8 @@
 
 	IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
+	ip_vs_update_conntrack(skb, cp);
+
 	/* FIXME: when application helper enlarges the packet and the length
 	   is larger than the MTU of outgoing device, there will be still
 	   MTU problem. */
@@ -479,6 +506,8 @@
 
 	IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
+	ip_vs_update_conntrack(skb, cp);
+
 	/* FIXME: when application helper enlarges the packet and the length
 	   is larger than the MTU of outgoing device, there will be still
 	   MTU problem. */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 16b41b4..df3eedb 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -966,8 +966,7 @@
 		if (acct) {
 			spin_lock_bh(&ct->lock);
 			acct[CTINFO2DIR(ctinfo)].packets++;
-			acct[CTINFO2DIR(ctinfo)].bytes +=
-				skb->len - skb_network_offset(skb);
+			acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
 			spin_unlock_bh(&ct->lock);
 		}
 	}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fdc8fb4..7dcf7a4 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -23,9 +23,10 @@
 {
 	unsigned int i;
 	struct nf_ct_ext_type *t;
+	struct nf_ct_ext *ext = ct->ext;
 
 	for (i = 0; i < NF_CT_EXT_NUM; i++) {
-		if (!nf_ct_ext_exist(ct, i))
+		if (!__nf_ct_ext_exist(ext, i))
 			continue;
 
 		rcu_read_lock();
@@ -73,44 +74,45 @@
 
 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
 {
-	struct nf_ct_ext *new;
+	struct nf_ct_ext *old, *new;
 	int i, newlen, newoff;
 	struct nf_ct_ext_type *t;
 
 	/* Conntrack must not be confirmed to avoid races on reallocation. */
 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 
-	if (!ct->ext)
+	old = ct->ext;
+	if (!old)
 		return nf_ct_ext_create(&ct->ext, id, gfp);
 
-	if (nf_ct_ext_exist(ct, id))
+	if (__nf_ct_ext_exist(old, id))
 		return NULL;
 
 	rcu_read_lock();
 	t = rcu_dereference(nf_ct_ext_types[id]);
 	BUG_ON(t == NULL);
 
-	newoff = ALIGN(ct->ext->len, t->align);
+	newoff = ALIGN(old->len, t->align);
 	newlen = newoff + t->len;
 	rcu_read_unlock();
 
-	new = __krealloc(ct->ext, newlen, gfp);
+	new = __krealloc(old, newlen, gfp);
 	if (!new)
 		return NULL;
 
-	if (new != ct->ext) {
+	if (new != old) {
 		for (i = 0; i < NF_CT_EXT_NUM; i++) {
-			if (!nf_ct_ext_exist(ct, i))
+			if (!__nf_ct_ext_exist(old, i))
 				continue;
 
 			rcu_read_lock();
 			t = rcu_dereference(nf_ct_ext_types[i]);
 			if (t && t->move)
 				t->move((void *)new + new->offset[i],
-					(void *)ct->ext + ct->ext->offset[i]);
+					(void *)old + old->offset[i]);
 			rcu_read_unlock();
 		}
-		call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu);
+		call_rcu(&old->rcu, __nf_ct_ext_free_rcu);
 		ct->ext = new;
 	}
 
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 802dbff..c4c885d 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -585,8 +585,16 @@
 			 * Let's try to use the data from the packet.
 			 */
 			sender->td_end = end;
+			win <<= sender->td_scale;
 			sender->td_maxwin = (win == 0 ? 1 : win);
 			sender->td_maxend = end + sender->td_maxwin;
+			/*
+			 * We haven't seen traffic in the other direction yet
+			 * but we have to tweak window tracking to pass III
+			 * and IV until that happens.
+			 */
+			if (receiver->td_maxwin == 0)
+				receiver->td_end = receiver->td_maxend = sack;
 		}
 	} else if (((state->state == TCP_CONNTRACK_SYN_SENT
 		     && dir == IP_CT_DIR_ORIGINAL)
@@ -680,7 +688,7 @@
 		/*
 		 * Update receiver data.
 		 */
-		if (after(end, sender->td_maxend))
+		if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
 			receiver->td_maxwin += end - sender->td_maxend;
 		if (after(sack + win, receiver->td_maxend - 1)) {
 			receiver->td_maxend = sack + win;
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
new file mode 100644
index 0000000..0f642ef
--- /dev/null
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -0,0 +1,70 @@
+/* iptables module for the packet checksum mangling
+ *
+ * (C) 2002 by Harald Welte <laforge@netfilter.org>
+ * (C) 2010 Red Hat, Inc.
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CHECKSUM.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
+MODULE_DESCRIPTION("Xtables: checksum modification");
+MODULE_ALIAS("ipt_CHECKSUM");
+MODULE_ALIAS("ip6t_CHECKSUM");
+
+static unsigned int
+checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		skb_checksum_help(skb);
+
+	return XT_CONTINUE;
+}
+
+static int checksum_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_CHECKSUM_info *einfo = par->targinfo;
+
+	if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
+		pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
+		return -EINVAL;
+	}
+	if (!einfo->operation) {
+		pr_info("no CHECKSUM operation enabled\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct xt_target checksum_tg_reg __read_mostly = {
+	.name		= "CHECKSUM",
+	.family		= NFPROTO_UNSPEC,
+	.target		= checksum_tg,
+	.targetsize	= sizeof(struct xt_CHECKSUM_info),
+	.table		= "mangle",
+	.checkentry	= checksum_tg_check,
+	.me		= THIS_MODULE,
+};
+
+static int __init checksum_tg_init(void)
+{
+	return xt_register_target(&checksum_tg_reg);
+}
+
+static void __exit checksum_tg_exit(void)
+{
+	xt_unregister_target(&checksum_tg_reg);
+}
+
+module_init(checksum_tg_init);
+module_exit(checksum_tg_exit);
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index e1a0ded..c61294d 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -37,8 +37,10 @@
 		return NF_DROP;
 
 	sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
-				   iph->saddr, tgi->laddr ? tgi->laddr : iph->daddr,
-				   hp->source, tgi->lport ? tgi->lport : hp->dest,
+				   iph->saddr,
+				   tgi->laddr ? tgi->laddr : iph->daddr,
+				   hp->source,
+				   tgi->lport ? tgi->lport : hp->dest,
 				   par->in, true);
 
 	/* NOTE: assign_sock consumes our sk reference */
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
new file mode 100644
index 0000000..b39db8a
--- /dev/null
+++ b/net/netfilter/xt_cpu.c
@@ -0,0 +1,63 @@
+/* Kernel module to match running CPU */
+
+/*
+ * Might be used to distribute connections on several daemons, if
+ * RPS (Remote Packet Steering) is enabled or NIC is multiqueue capable,
+ * each RX queue IRQ affined to one CPU (1:1 mapping)
+ *
+ */
+
+/* (C) 2010 Eric Dumazet
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/xt_cpu.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
+MODULE_DESCRIPTION("Xtables: CPU match");
+
+static int cpu_mt_check(const struct xt_mtchk_param *par)
+{
+	const struct xt_cpu_info *info = par->matchinfo;
+
+	if (info->invert & ~1)
+		return -EINVAL;
+	return 0;
+}
+
+static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_cpu_info *info = par->matchinfo;
+
+	return (info->cpu == smp_processor_id()) ^ info->invert;
+}
+
+static struct xt_match cpu_mt_reg __read_mostly = {
+	.name       = "cpu",
+	.revision   = 0,
+	.family     = NFPROTO_UNSPEC,
+	.checkentry = cpu_mt_check,
+	.match      = cpu_mt,
+	.matchsize  = sizeof(struct xt_cpu_info),
+	.me         = THIS_MODULE,
+};
+
+static int __init cpu_mt_init(void)
+{
+	return xt_register_match(&cpu_mt_reg);
+}
+
+static void __exit cpu_mt_exit(void)
+{
+	xt_unregister_match(&cpu_mt_reg);
+}
+
+module_init(cpu_mt_init);
+module_exit(cpu_mt_exit);
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
new file mode 100644
index 0000000..7a4d66d
--- /dev/null
+++ b/net/netfilter/xt_ipvs.c
@@ -0,0 +1,189 @@
+/*
+ *	xt_ipvs - kernel module to match IPVS connection properties
+ *
+ *	Author: Hannes Eder <heder@google.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#ifdef CONFIG_IP_VS_IPV6
+#include <net/ipv6.h>
+#endif
+#include <linux/ip_vs.h>
+#include <linux/types.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_ipvs.h>
+#include <net/netfilter/nf_conntrack.h>
+
+#include <net/ip_vs.h>
+
+MODULE_AUTHOR("Hannes Eder <heder@google.com>");
+MODULE_DESCRIPTION("Xtables: match IPVS connection properties");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ipvs");
+MODULE_ALIAS("ip6t_ipvs");
+
+/* borrowed from xt_conntrack */
+static bool ipvs_mt_addrcmp(const union nf_inet_addr *kaddr,
+			    const union nf_inet_addr *uaddr,
+			    const union nf_inet_addr *umask,
+			    unsigned int l3proto)
+{
+	if (l3proto == NFPROTO_IPV4)
+		return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
+#ifdef CONFIG_IP_VS_IPV6
+	else if (l3proto == NFPROTO_IPV6)
+		return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
+		       &uaddr->in6) == 0;
+#endif
+	else
+		return false;
+}
+
+static bool
+ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_ipvs_mtinfo *data = par->matchinfo;
+	/* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
+	const u_int8_t family = par->family;
+	struct ip_vs_iphdr iph;
+	struct ip_vs_protocol *pp;
+	struct ip_vs_conn *cp;
+	bool match = true;
+
+	if (data->bitmask == XT_IPVS_IPVS_PROPERTY) {
+		match = skb->ipvs_property ^
+			!!(data->invert & XT_IPVS_IPVS_PROPERTY);
+		goto out;
+	}
+
+	/* other flags than XT_IPVS_IPVS_PROPERTY are set */
+	if (!skb->ipvs_property) {
+		match = false;
+		goto out;
+	}
+
+	ip_vs_fill_iphdr(family, skb_network_header(skb), &iph);
+
+	if (data->bitmask & XT_IPVS_PROTO)
+		if ((iph.protocol == data->l4proto) ^
+		    !(data->invert & XT_IPVS_PROTO)) {
+			match = false;
+			goto out;
+		}
+
+	pp = ip_vs_proto_get(iph.protocol);
+	if (unlikely(!pp)) {
+		match = false;
+		goto out;
+	}
+
+	/*
+	 * Check if the packet belongs to an existing entry
+	 */
+	cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+	if (unlikely(cp == NULL)) {
+		match = false;
+		goto out;
+	}
+
+	/*
+	 * We found a connection, i.e. ct != 0, make sure to call
+	 * __ip_vs_conn_put before returning.  In our case jump to out_put_con.
+	 */
+
+	if (data->bitmask & XT_IPVS_VPORT)
+		if ((cp->vport == data->vport) ^
+		    !(data->invert & XT_IPVS_VPORT)) {
+			match = false;
+			goto out_put_cp;
+		}
+
+	if (data->bitmask & XT_IPVS_VPORTCTL)
+		if ((cp->control != NULL &&
+		     cp->control->vport == data->vportctl) ^
+		    !(data->invert & XT_IPVS_VPORTCTL)) {
+			match = false;
+			goto out_put_cp;
+		}
+
+	if (data->bitmask & XT_IPVS_DIR) {
+		enum ip_conntrack_info ctinfo;
+		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+		if (ct == NULL || nf_ct_is_untracked(ct)) {
+			match = false;
+			goto out_put_cp;
+		}
+
+		if ((ctinfo >= IP_CT_IS_REPLY) ^
+		    !!(data->invert & XT_IPVS_DIR)) {
+			match = false;
+			goto out_put_cp;
+		}
+	}
+
+	if (data->bitmask & XT_IPVS_METHOD)
+		if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^
+		    !(data->invert & XT_IPVS_METHOD)) {
+			match = false;
+			goto out_put_cp;
+		}
+
+	if (data->bitmask & XT_IPVS_VADDR) {
+		if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
+				    &data->vmask, family) ^
+		    !(data->invert & XT_IPVS_VADDR)) {
+			match = false;
+			goto out_put_cp;
+		}
+	}
+
+out_put_cp:
+	__ip_vs_conn_put(cp);
+out:
+	pr_debug("match=%d\n", match);
+	return match;
+}
+
+static int ipvs_mt_check(const struct xt_mtchk_param *par)
+{
+	if (par->family != NFPROTO_IPV4
+#ifdef CONFIG_IP_VS_IPV6
+	    && par->family != NFPROTO_IPV6
+#endif
+		) {
+		pr_info("protocol family %u not supported\n", par->family);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_match xt_ipvs_mt_reg __read_mostly = {
+	.name       = "ipvs",
+	.revision   = 0,
+	.family     = NFPROTO_UNSPEC,
+	.match      = ipvs_mt,
+	.checkentry = ipvs_mt_check,
+	.matchsize  = XT_ALIGN(sizeof(struct xt_ipvs_mtinfo)),
+	.me         = THIS_MODULE,
+};
+
+static int __init ipvs_mt_init(void)
+{
+	return xt_register_match(&xt_ipvs_mt_reg);
+}
+
+static void __exit ipvs_mt_exit(void)
+{
+	xt_unregister_match(&xt_ipvs_mt_reg);
+}
+
+module_init(ipvs_mt_init);
+module_exit(ipvs_mt_exit);
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index b4f7dfe..70eb2b4 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -11,7 +11,8 @@
 #include <linux/netfilter/xt_quota.h>
 
 struct xt_quota_priv {
-	uint64_t quota;
+	spinlock_t	lock;
+	uint64_t	quota;
 };
 
 MODULE_LICENSE("GPL");
@@ -20,8 +21,6 @@
 MODULE_ALIAS("ipt_quota");
 MODULE_ALIAS("ip6t_quota");
 
-static DEFINE_SPINLOCK(quota_lock);
-
 static bool
 quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -29,7 +28,7 @@
 	struct xt_quota_priv *priv = q->master;
 	bool ret = q->flags & XT_QUOTA_INVERT;
 
-	spin_lock_bh(&quota_lock);
+	spin_lock_bh(&priv->lock);
 	if (priv->quota >= skb->len) {
 		priv->quota -= skb->len;
 		ret = !ret;
@@ -37,9 +36,7 @@
 		/* we do not allow even small packets from now on */
 		priv->quota = 0;
 	}
-	/* Copy quota back to matchinfo so that iptables can display it */
-	q->quota = priv->quota;
-	spin_unlock_bh(&quota_lock);
+	spin_unlock_bh(&priv->lock);
 
 	return ret;
 }
@@ -55,6 +52,7 @@
 	if (q->master == NULL)
 		return -ENOMEM;
 
+	spin_lock_init(&q->master->lock);
 	q->master->quota = q->quota;
 	return 0;
 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8648a99..2cbf380 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1406,7 +1406,7 @@
 	struct netlink_sock *nlk = nlk_sk(sk);
 	int noblock = flags&MSG_DONTWAIT;
 	size_t copied;
-	struct sk_buff *skb, *frag __maybe_unused = NULL;
+	struct sk_buff *skb;
 	int err;
 
 	if (flags&MSG_OOB)
@@ -1441,7 +1441,21 @@
 			kfree_skb(skb);
 			skb = compskb;
 		} else {
-			frag = skb_shinfo(skb)->frag_list;
+			/*
+			 * Before setting frag_list to NULL, we must get a
+			 * private copy of skb if shared (because of MSG_PEEK)
+			 */
+			if (skb_shared(skb)) {
+				struct sk_buff *nskb;
+
+				nskb = pskb_copy(skb, GFP_KERNEL);
+				kfree_skb(skb);
+				skb = nskb;
+				err = -ENOMEM;
+				if (!skb)
+					goto out;
+			}
+			kfree_skb(skb_shinfo(skb)->frag_list);
 			skb_shinfo(skb)->frag_list = NULL;
 		}
 	}
@@ -1478,10 +1492,6 @@
 	if (flags & MSG_TRUNC)
 		copied = skb->len;
 
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
-	skb_shinfo(skb)->frag_list = frag;
-#endif
-
 	skb_free_datagram(sk, skb);
 
 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index aa4308a..26ed3e8 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -303,6 +303,7 @@
 errout:
 	return err;
 }
+EXPORT_SYMBOL(genl_register_ops);
 
 /**
  * genl_unregister_ops - unregister generic netlink operations
@@ -337,6 +338,7 @@
 
 	return -ENOENT;
 }
+EXPORT_SYMBOL(genl_unregister_ops);
 
 /**
  * genl_register_family - register a generic netlink family
@@ -405,6 +407,7 @@
 errout:
 	return err;
 }
+EXPORT_SYMBOL(genl_register_family);
 
 /**
  * genl_register_family_with_ops - register a generic netlink family
@@ -485,6 +488,7 @@
 
 	return -ENOENT;
 }
+EXPORT_SYMBOL(genl_unregister_family);
 
 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
@@ -873,11 +877,7 @@
 	for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
 		INIT_LIST_HEAD(&family_ht[i]);
 
-	err = genl_register_family(&genl_ctrl);
-	if (err < 0)
-		goto problem;
-
-	err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
+	err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1);
 	if (err < 0)
 		goto problem;
 
@@ -899,11 +899,6 @@
 
 subsys_initcall(genl_init);
 
-EXPORT_SYMBOL(genl_register_ops);
-EXPORT_SYMBOL(genl_unregister_ops);
-EXPORT_SYMBOL(genl_register_family);
-EXPORT_SYMBOL(genl_unregister_family);
-
 static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
 			 gfp_t flags)
 {
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index cbc244a..b4fdaac 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -109,7 +109,9 @@
 		init_timer(&rose_neigh->t0timer);
 
 		if (rose_route->ndigis != 0) {
-			if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
+			rose_neigh->digipeat =
+				kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
+			if (rose_neigh->digipeat == NULL) {
 				kfree(rose_neigh);
 				res = -ENOMEM;
 				goto out;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a16b017..11f195a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -33,6 +33,7 @@
 static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
 static u32 mirred_idx_gen;
 static DEFINE_RWLOCK(mirred_lock);
+static LIST_HEAD(mirred_list);
 
 static struct tcf_hashinfo mirred_hash_info = {
 	.htab	=	tcf_mirred_ht,
@@ -47,7 +48,9 @@
 			m->tcf_bindcnt--;
 		m->tcf_refcnt--;
 		if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
-			dev_put(m->tcfm_dev);
+			list_del(&m->tcfm_list);
+			if (m->tcfm_dev)
+				dev_put(m->tcfm_dev);
 			tcf_hash_destroy(&m->common, &mirred_hash_info);
 			return 1;
 		}
@@ -134,8 +137,10 @@
 		m->tcfm_ok_push = ok_push;
 	}
 	spin_unlock_bh(&m->tcf_lock);
-	if (ret == ACT_P_CREATED)
+	if (ret == ACT_P_CREATED) {
+		list_add(&m->tcfm_list, &mirred_list);
 		tcf_hash_insert(pc, &mirred_hash_info);
+	}
 
 	return ret;
 }
@@ -164,9 +169,14 @@
 	m->tcf_bstats.packets++;
 
 	dev = m->tcfm_dev;
+	if (!dev) {
+		printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+		goto out;
+	}
+
 	if (!(dev->flags & IFF_UP)) {
 		if (net_ratelimit())
-			pr_notice("tc mirred to Houston: device %s is gone!\n",
+			pr_notice("tc mirred to Houston: device %s is down\n",
 				  dev->name);
 		goto out;
 	}
@@ -230,6 +240,28 @@
 	return -1;
 }
 
+static int mirred_device_event(struct notifier_block *unused,
+			       unsigned long event, void *ptr)
+{
+	struct net_device *dev = ptr;
+	struct tcf_mirred *m;
+
+	if (event == NETDEV_UNREGISTER)
+		list_for_each_entry(m, &mirred_list, tcfm_list) {
+			if (m->tcfm_dev == dev) {
+				dev_put(dev);
+				m->tcfm_dev = NULL;
+			}
+		}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block mirred_device_notifier = {
+	.notifier_call = mirred_device_event,
+};
+
+
 static struct tc_action_ops act_mirred_ops = {
 	.kind		=	"mirred",
 	.hinfo		=	&mirred_hash_info,
@@ -250,12 +282,17 @@
 
 static int __init mirred_init_module(void)
 {
+	int err = register_netdevice_notifier(&mirred_device_notifier);
+	if (err)
+		return err;
+
 	pr_info("Mirror/redirect action on\n");
 	return tcf_register_action(&act_mirred_ops);
 }
 
 static void __exit mirred_cleanup_module(void)
 {
+	unregister_netdevice_notifier(&mirred_device_notifier);
 	tcf_unregister_action(&act_mirred_ops);
 }
 
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 24e614c..d0386a4 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -218,6 +218,7 @@
 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
 			goto drop;
 
+		icmph = (void *)(skb_network_header(skb) + ihl);
 		iph = (void *)(icmph + 1);
 		if (egress)
 			addr = iph->daddr;
@@ -246,7 +247,7 @@
 			iph->saddr = new_addr;
 
 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-					 1);
+					 0);
 		break;
 	}
 	default:
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4f52214..7416a5c 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -134,10 +134,12 @@
 #endif
 
 		for (i = n->sel.nkeys; i>0; i--, key++) {
-			unsigned int toff;
+			int toff = off + key->off + (off2 & key->offmask);
 			__be32 *data, _data;
 
-			toff = off + key->off + (off2 & key->offmask);
+			if (skb_headroom(skb) + toff < 0)
+				goto out;
+
 			data = skb_header_pointer(skb, toff, 4, &_data);
 			if (!data)
 				goto out;