Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/yellowfin.c
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fed9bda..21333c1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1727,12 +1727,14 @@
 	tristate "Micrel KSZ8842"
 	depends on HAS_IOMEM
 	help
-	  This platform driver is for Micrel KSZ8842 chip.
+	  This platform driver is for Micrel KSZ8842 / KS8842
+	  2-port ethernet switch chip (managed, VLAN, QoS).
 
 config KS8851
        tristate "Micrel KS8851 SPI"
        depends on SPI
        select MII
+	select CRC32
        help
          SPI driver for Micrel KS8851 SPI attached network chip.
 
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 890716f..3a0948f 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -1098,7 +1098,7 @@
 	.probe		= w90p910_ether_probe,
 	.remove		= __devexit_p(w90p910_ether_remove),
 	.driver		= {
-		.name	= "w90p910-emc",
+		.name	= "nuc900-emc",
 		.owner	= THIS_MODULE,
 	},
 };
@@ -1119,5 +1119,5 @@
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 MAC driver!");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-emc");
+MODULE_ALIAS("platform:nuc900-emc");
 
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index bee5101..951735c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -952,9 +952,10 @@
 	int rc = NETDEV_TX_OK;
 	dma_addr_t mapping;
 	u32 len, entry, ctrl;
+	unsigned long flags;
 
 	len = skb->len;
-	spin_lock_irq(&bp->lock);
+	spin_lock_irqsave(&bp->lock, flags);
 
 	/* This is a hard error, log it. */
 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@@ -1027,7 +1028,7 @@
 	dev->trans_start = jiffies;
 
 out_unlock:
-	spin_unlock_irq(&bp->lock);
+	spin_unlock_irqrestore(&bp->lock, flags);
 
 	return rc;
 
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fcaf3bc..1357d54 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -401,9 +401,11 @@
 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
+	mutex_lock(&bp->cnic_lock);
 	cp->drv_state = 0;
 	bnapi->cnic_present = 0;
 	rcu_assign_pointer(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_lock);
 	synchronize_rcu();
 	return 0;
 }
@@ -431,13 +433,13 @@
 	struct cnic_ops *c_ops;
 	struct cnic_ctl_info info;
 
-	rcu_read_lock();
-	c_ops = rcu_dereference(bp->cnic_ops);
+	mutex_lock(&bp->cnic_lock);
+	c_ops = bp->cnic_ops;
 	if (c_ops) {
 		info.cmd = CNIC_CTL_STOP_CMD;
 		c_ops->cnic_ctl(bp->cnic_data, &info);
 	}
-	rcu_read_unlock();
+	mutex_unlock(&bp->cnic_lock);
 }
 
 static void
@@ -446,8 +448,8 @@
 	struct cnic_ops *c_ops;
 	struct cnic_ctl_info info;
 
-	rcu_read_lock();
-	c_ops = rcu_dereference(bp->cnic_ops);
+	mutex_lock(&bp->cnic_lock);
+	c_ops = bp->cnic_ops;
 	if (c_ops) {
 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -457,7 +459,7 @@
 		info.cmd = CNIC_CTL_START_CMD;
 		c_ops->cnic_ctl(bp->cnic_data, &info);
 	}
-	rcu_read_unlock();
+	mutex_unlock(&bp->cnic_lock);
 }
 
 #else
@@ -7687,6 +7689,9 @@
 
 	spin_lock_init(&bp->phy_lock);
 	spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_lock);
+#endif
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 7544188..6c7f795 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6903,6 +6903,7 @@
 	u32			idle_chk_status_idx;
 
 #ifdef BCM_CNIC
+	struct mutex		cnic_lock;
 	struct cnic_eth_dev	cnic_eth_dev;
 #endif
 
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index f8a0923..d45eacb 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -138,6 +138,16 @@
 	return NULL;
 }
 
+static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_inc(&ulp_ops->ref_count);
+}
+
+static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_dec(&ulp_ops->ref_count);
+}
+
 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -358,6 +368,7 @@
 	}
 	read_unlock(&cnic_dev_lock);
 
+	atomic_set(&ulp_ops->ref_count, 0);
 	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 	mutex_unlock(&cnic_lock);
 
@@ -379,6 +390,8 @@
 int cnic_unregister_driver(int ulp_type)
 {
 	struct cnic_dev *dev;
+	struct cnic_ulp_ops *ulp_ops;
+	int i = 0;
 
 	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@@ -386,7 +399,8 @@
 		return -EINVAL;
 	}
 	mutex_lock(&cnic_lock);
-	if (!cnic_ulp_tbl[ulp_type]) {
+	ulp_ops = cnic_ulp_tbl[ulp_type];
+	if (!ulp_ops) {
 		printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
 				    "been registered\n", ulp_type);
 		goto out_unlock;
@@ -411,6 +425,14 @@
 
 	mutex_unlock(&cnic_lock);
 	synchronize_rcu();
+	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
+		msleep(100);
+		i++;
+	}
+
+	if (atomic_read(&ulp_ops->ref_count) != 0)
+		printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
+					" to zero.\n", dev->netdev->name);
 	return 0;
 
 out_unlock:
@@ -466,6 +488,7 @@
 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 {
 	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
 
 	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
@@ -486,6 +509,15 @@
 
 	synchronize_rcu();
 
+	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
+	       i < 20) {
+		msleep(100);
+		i++;
+	}
+	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
+		printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
+					" to complete.\n", dev->netdev->name);
+
 	return 0;
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1101,18 +1133,23 @@
 	if (cp->cnic_uinfo)
 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 
-	rcu_read_lock();
 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
 		struct cnic_ulp_ops *ulp_ops;
 
-		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
-		if (!ulp_ops)
+		mutex_lock(&cnic_lock);
+		ulp_ops = cp->ulp_ops[if_type];
+		if (!ulp_ops) {
+			mutex_unlock(&cnic_lock);
 			continue;
+		}
+		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+		mutex_unlock(&cnic_lock);
 
 		if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
 			ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+
+		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
 	}
-	rcu_read_unlock();
 }
 
 static void cnic_ulp_start(struct cnic_dev *dev)
@@ -1120,18 +1157,23 @@
 	struct cnic_local *cp = dev->cnic_priv;
 	int if_type;
 
-	rcu_read_lock();
 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
 		struct cnic_ulp_ops *ulp_ops;
 
-		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
-		if (!ulp_ops || !ulp_ops->cnic_start)
+		mutex_lock(&cnic_lock);
+		ulp_ops = cp->ulp_ops[if_type];
+		if (!ulp_ops || !ulp_ops->cnic_start) {
+			mutex_unlock(&cnic_lock);
 			continue;
+		}
+		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+		mutex_unlock(&cnic_lock);
 
 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
 			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+
+		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
 	}
-	rcu_read_unlock();
 }
 
 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
@@ -1141,22 +1183,18 @@
 	switch (info->cmd) {
 	case CNIC_CTL_STOP_CMD:
 		cnic_hold(dev);
-		mutex_lock(&cnic_lock);
 
 		cnic_ulp_stop(dev);
 		cnic_stop_hw(dev);
 
-		mutex_unlock(&cnic_lock);
 		cnic_put(dev);
 		break;
 	case CNIC_CTL_START_CMD:
 		cnic_hold(dev);
-		mutex_lock(&cnic_lock);
 
 		if (!cnic_start_hw(dev))
 			cnic_ulp_start(dev);
 
-		mutex_unlock(&cnic_lock);
 		cnic_put(dev);
 		break;
 	default:
@@ -1170,19 +1208,23 @@
 	int i;
 	struct cnic_local *cp = dev->cnic_priv;
 
-	rcu_read_lock();
 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
 		struct cnic_ulp_ops *ulp_ops;
 
-		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
-		if (!ulp_ops || !ulp_ops->cnic_init)
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl[i];
+		if (!ulp_ops || !ulp_ops->cnic_init) {
+			mutex_unlock(&cnic_lock);
 			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
 
 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
 			ulp_ops->cnic_init(dev);
 
+		ulp_put(ulp_ops);
 	}
-	rcu_read_unlock();
 }
 
 static void cnic_ulp_exit(struct cnic_dev *dev)
@@ -1190,19 +1232,23 @@
 	int i;
 	struct cnic_local *cp = dev->cnic_priv;
 
-	rcu_read_lock();
 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
 		struct cnic_ulp_ops *ulp_ops;
 
-		ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
-		if (!ulp_ops || !ulp_ops->cnic_exit)
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl[i];
+		if (!ulp_ops || !ulp_ops->cnic_exit) {
+			mutex_unlock(&cnic_lock);
 			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
 
 		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
 			ulp_ops->cnic_exit(dev);
 
+		ulp_put(ulp_ops);
 	}
-	rcu_read_unlock();
 }
 
 static int cnic_cm_offload_pg(struct cnic_sock *csk)
@@ -2418,6 +2464,37 @@
 	return 0;
 }
 
+static int cnic_register_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (!ethdev)
+		return -ENODEV;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
+		return 0;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err)
+		printk(KERN_ERR PFX "%s: register_cnic failed\n",
+		       dev->netdev->name);
+
+	return err;
+}
+
+static void cnic_unregister_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!ethdev)
+		return;
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+}
+
 static int cnic_start_hw(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -2427,13 +2504,6 @@
 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 		return -EALREADY;
 
-	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
-	if (err) {
-		printk(KERN_ERR PFX "%s: register_cnic failed\n",
-		       dev->netdev->name);
-		goto err2;
-	}
-
 	dev->regview = ethdev->io_base;
 	cp->chip_id = ethdev->chip_id;
 	pci_dev_get(dev->pcidev);
@@ -2463,18 +2533,13 @@
 	return 0;
 
 err1:
-	ethdev->drv_unregister_cnic(dev->netdev);
 	cp->free_resc(dev);
 	pci_dev_put(dev->pcidev);
-err2:
 	return err;
 }
 
 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
 {
-	struct cnic_local *cp = dev->cnic_priv;
-	struct cnic_eth_dev *ethdev = cp->ethdev;
-
 	cnic_disable_bnx2_int_sync(dev);
 
 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
@@ -2486,8 +2551,6 @@
 	cnic_setup_5709_context(dev, 0);
 	cnic_free_irq(dev);
 
-	ethdev->drv_unregister_cnic(dev->netdev);
-
 	cnic_free_resc(dev);
 }
 
@@ -2568,7 +2631,7 @@
 	probe = symbol_get(bnx2_cnic_probe);
 	if (probe) {
 		ethdev = (*probe)(dev);
-		symbol_put_addr(probe);
+		symbol_put(bnx2_cnic_probe);
 	}
 	if (!ethdev)
 		return NULL;
@@ -2671,10 +2734,12 @@
 		else if (event == NETDEV_UNREGISTER)
 			cnic_ulp_exit(dev);
 		else if (event == NETDEV_UP) {
-			mutex_lock(&cnic_lock);
+			if (cnic_register_netdev(dev) != 0) {
+				cnic_put(dev);
+				goto done;
+			}
 			if (!cnic_start_hw(dev))
 				cnic_ulp_start(dev);
-			mutex_unlock(&cnic_lock);
 		}
 
 		rcu_read_lock();
@@ -2693,10 +2758,9 @@
 		rcu_read_unlock();
 
 		if (event == NETDEV_GOING_DOWN) {
-			mutex_lock(&cnic_lock);
 			cnic_ulp_stop(dev);
 			cnic_stop_hw(dev);
-			mutex_unlock(&cnic_lock);
+			cnic_unregister_netdev(dev);
 		} else if (event == NETDEV_UNREGISTER) {
 			write_lock(&cnic_dev_lock);
 			list_del_init(&dev->list);
@@ -2728,6 +2792,7 @@
 		}
 
 		cnic_ulp_exit(dev);
+		cnic_unregister_netdev(dev);
 		list_del_init(&dev->list);
 		cnic_free_dev(dev);
 	}
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 5192d4a..a94b302 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -176,6 +176,7 @@
 	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
 #define ULP_F_INIT	0
 #define ULP_F_START	1
+#define ULP_F_CALL_PENDING	2
 	struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
 
 	/* protected by ulp_lock */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d1bce27..a492357 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -290,6 +290,7 @@
 	void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
 				  char *data, u16 data_size);
 	struct module *owner;
+	atomic_t ref_count;
 };
 
 extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 6a31760..679965c 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1900,7 +1900,7 @@
 				nic->ru_running = RU_SUSPENDED;
 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
 					       sizeof(struct rfd),
-					       PCI_DMA_BIDIRECTIONAL);
+					       PCI_DMA_FROMDEVICE);
 		return -ENODATA;
 	}
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 428bf6d..0f8d961 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4539,8 +4539,7 @@
 		/* Allow time for pending master requests to run */
 		e1000e_disable_pcie_master(&adapter->hw);
 
-		if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
-		    !(hw->mac.ops.check_mng_mode(hw))) {
+		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
 			/* enable wakeup by the PHY */
 			retval = e1000_init_phy_wakeup(adapter, wufc);
 			if (retval)
@@ -4558,7 +4557,8 @@
 	*enable_wake = !!wufc;
 
 	/* make sure adapter isn't asleep if manageability is enabled */
-	if (adapter->flags & FLAG_MNG_PT_ENABLED)
+	if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+	    (hw->mac.ops.check_mng_mode(hw)))
 		*enable_wake = true;
 
 	if (adapter->hw.phy.type == e1000_phy_igp_3)
@@ -4671,14 +4671,6 @@
 		return err;
 	}
 
-	/* AER (Advanced Error Reporting) hooks */
-	err = pci_enable_pcie_error_reporting(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-		                    "0x%x\n", err);
-		/* non-fatal, continue */
-	}
-
 	pci_set_master(pdev);
 
 	pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4991,6 +4983,14 @@
 	if (err)
 		goto err_pci_reg;
 
+	/* AER (Advanced Error Reporting) hooks */
+	err = pci_enable_pcie_error_reporting(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+		        "0x%x\n", err);
+		/* non-fatal, continue */
+	}
+
 	pci_set_master(pdev);
 	/* PCI config space info */
 	err = pci_save_state(pdev);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index cc78633..c40113f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -309,6 +309,7 @@
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
 	struct bcom_fec_bd *bd;
+	unsigned long flags;
 
 	if (bcom_queue_full(priv->tx_dmatsk)) {
 		if (net_ratelimit())
@@ -316,7 +317,7 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	spin_lock_irq(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 	dev->trans_start = jiffies;
 
 	bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@
 		netif_stop_queue(dev);
 	}
 
-	spin_unlock_irq(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4544da4..772104c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -490,6 +490,7 @@
 
 	dev_set_drvdata(&ofdev->dev, NULL);
 
+	unregister_netdev(dev);
 	iounmap(priv->regs);
 	free_netdev(priv->ndev);
 
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 5443558..d7579e4 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1305,6 +1305,8 @@
 
 	free_irq(dev->emac_irq, dev);
 
+	netif_carrier_off(ndev);
+
 	return 0;
 }
 
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 22baf65..eb42468 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
-#include <linux/etherdevice.h>
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 #include <linux/interrupt.h>
@@ -205,9 +204,6 @@
 	.ndo_start_xmit		= au1k_irda_hard_xmit,
 	.ndo_tx_timeout		= au1k_tx_timeout,
 	.ndo_do_ioctl		= au1k_irda_ioctl,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
 };
 
 static int au1k_irda_net_init(struct net_device *dev)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index e76a083..1445e58 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -803,9 +803,6 @@
 	.ndo_stop		= pxa_irda_stop,
 	.ndo_start_xmit		= pxa_irda_hard_xmit,
 	.ndo_do_ioctl		= pxa_irda_ioctl,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
 };
 
 static int pxa_irda_probe(struct platform_device *pdev)
@@ -830,6 +827,7 @@
 	if (!dev)
 		goto err_mem_3;
 
+	SET_NETDEV_DEV(dev, &pdev->dev);
 	si = netdev_priv(dev);
 	si->dev = &pdev->dev;
 	si->pdata = pdev->dev.platform_data;
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 70e6acc..38bf7cf 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,7 +24,6 @@
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
-#include <linux/etherdevice.h>
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 #include <linux/interrupt.h>
@@ -881,9 +880,6 @@
 	.ndo_stop		= sa1100_irda_stop,
 	.ndo_start_xmit		= sa1100_irda_hard_xmit,
 	.ndo_do_ioctl		= sa1100_irda_ioctl,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
 };
 
 static int sa1100_irda_probe(struct platform_device *pdev)
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 588b44d..1272434 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -41,6 +41,7 @@
 	struct ixpdev_priv *ip = netdev_priv(dev);
 	struct ixpdev_tx_desc *desc;
 	int entry;
+	unsigned long flags;
 
 	if (unlikely(skb->len > PAGE_SIZE)) {
 		/* @@@ Count drops.  */
@@ -63,11 +64,11 @@
 
 	dev->trans_start = jiffies;
 
-	local_irq_disable();
+	local_irq_save(flags);
 	ip->tx_queue_entries++;
 	if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
 		netif_stop_queue(dev);
-	local_irq_enable();
+	local_irq_restore(flags);
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index d22952c..01aaca9 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -620,6 +620,7 @@
 	dma_addr_t mapping;
 	unsigned int len, entry;
 	u32 ctrl;
+	unsigned long flags;
 
 #ifdef DEBUG
 	int i;
@@ -635,12 +636,12 @@
 #endif
 
 	len = skb->len;
-	spin_lock_irq(&bp->lock);
+	spin_lock_irqsave(&bp->lock, flags);
 
 	/* This is a hard error, log it. */
 	if (TX_BUFFS_AVAIL(bp) < 1) {
 		netif_stop_queue(dev);
-		spin_unlock_irq(&bp->lock);
+		spin_unlock_irqrestore(&bp->lock, flags);
 		dev_err(&bp->pdev->dev,
 			"BUG! Tx Ring full when queue awake!\n");
 		dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@
 	if (TX_BUFFS_AVAIL(bp) < 1)
 		netif_stop_queue(dev);
 
-	spin_unlock_irq(&bp->lock);
+	spin_unlock_irqrestore(&bp->lock, flags);
 
 	dev->trans_start = jiffies;
 
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index d3d6e99..8c72799 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -437,6 +437,7 @@
 {
 	struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
 	struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+	unsigned long flags;
 
 	/* If we don't have a pending timer, set one up to catch our recent
 	   post in case the interface becomes idle */
@@ -445,9 +446,9 @@
 
 	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
 	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		if (spin_trylock_irq(&ring->comp_lock)) {
+		if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
 			mlx4_en_process_tx_cq(priv->dev, cq);
-			spin_unlock_irq(&ring->comp_lock);
+			spin_unlock_irqrestore(&ring->comp_lock, flags);
 		}
 }
 
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 0f2c52c..61be6d7 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@
 /* this enables an interrupt in the interrupt mask register */
 #define SMC_ENABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_enable_flags;					\
+	spin_lock_irqsave(&lp->lock, smc_enable_flags);			\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask |= (x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_enable_flags);		\
 } while (0)
 
 /* this disables an interrupt from the interrupt mask register */
 #define SMC_DISABLE_INT(lp, x) do {					\
 	unsigned char mask;						\
-	spin_lock_irq(&lp->lock);					\
+	unsigned long smc_disable_flags;				\
+	spin_lock_irqsave(&lp->lock, smc_disable_flags);		\
 	mask = SMC_GET_INT_MASK(lp);					\
 	mask &= ~(x);							\
 	SMC_SET_INT_MASK(lp, mask);					\
-	spin_unlock_irq(&lp->lock);					\
+	spin_unlock_irqrestore(&lp->lock, smc_disable_flags);		\
 } while (0)
 
 /*
@@ -520,21 +522,21 @@
  * any other concurrent access and C would always interrupt B. But life
  * isn't that easy in a SMP world...
  */
-#define smc_special_trylock(lock)					\
+#define smc_special_trylock(lock, flags)				\
 ({									\
 	int __ret;							\
-	local_irq_disable();						\
+	local_irq_save(flags);						\
 	__ret = spin_trylock(lock);					\
 	if (!__ret)							\
-		local_irq_enable();					\
+		local_irq_restore(flags);				\
 	__ret;								\
 })
-#define smc_special_lock(lock)		spin_lock_irq(lock)
-#define smc_special_unlock(lock)	spin_unlock_irq(lock)
+#define smc_special_lock(lock, flags)		spin_lock_irqsave(lock, flags)
+#define smc_special_unlock(lock, flags) 	spin_unlock_irqrestore(lock, flags)
 #else
-#define smc_special_trylock(lock)	(1)
-#define smc_special_lock(lock)		do { } while (0)
-#define smc_special_unlock(lock)	do { } while (0)
+#define smc_special_trylock(lock, flags)	(1)
+#define smc_special_lock(lock, flags)   	do { } while (0)
+#define smc_special_unlock(lock, flags)	do { } while (0)
 #endif
 
 /*
@@ -548,10 +550,11 @@
 	struct sk_buff *skb;
 	unsigned int packet_no, len;
 	unsigned char *buf;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
-	if (!smc_special_trylock(&lp->lock)) {
+	if (!smc_special_trylock(&lp->lock, flags)) {
 		netif_stop_queue(dev);
 		tasklet_schedule(&lp->tx_task);
 		return;
@@ -559,7 +562,7 @@
 
 	skb = lp->pending_tx_skb;
 	if (unlikely(!skb)) {
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		return;
 	}
 	lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@
 		printk("%s: Memory allocation failed.\n", dev->name);
 		dev->stats.tx_errors++;
 		dev->stats.tx_fifo_errors++;
-		smc_special_unlock(&lp->lock);
+		smc_special_unlock(&lp->lock, flags);
 		goto done;
 	}
 
@@ -608,7 +611,7 @@
 
 	/* queue the packet for TX */
 	SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	dev->trans_start = jiffies;
 	dev->stats.tx_packets++;
@@ -633,6 +636,7 @@
 	struct smc_local *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
 	unsigned int numPages, poll_count, status;
+	unsigned long flags;
 
 	DBG(3, "%s: %s\n", dev->name, __func__);
 
@@ -658,7 +662,7 @@
 		return NETDEV_TX_OK;
 	}
 
-	smc_special_lock(&lp->lock);
+	smc_special_lock(&lp->lock, flags);
 
 	/* now, try to allocate the memory */
 	SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@
 		}
    	} while (--poll_count);
 
-	smc_special_unlock(&lp->lock);
+	smc_special_unlock(&lp->lock, flags);
 
 	lp->pending_tx_skb = skb;
    	if (!poll_count) {
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2a01f0e..b89b73c 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -653,8 +653,9 @@
 	int entry;
 	u32 flag;
 	dma_addr_t mapping;
+	unsigned long flags;
 
-	spin_lock_irq(&tp->lock);
+	spin_lock_irqsave(&tp->lock, flags);
 
 	/* Calculate the next Tx descriptor entry. */
 	entry = tp->cur_tx % TX_RING_SIZE;
@@ -689,7 +690,7 @@
 	/* Trigger an immediate transmit demand. */
 	iowrite32(0, tp->base_addr + CSR1);
 
-	spin_unlock_irq(&tp->lock);
+	spin_unlock_irqrestore(&tp->lock, flags);
 
 	dev->trans_start = jiffies;
 
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7fb96f3..3b647d0 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3084,10 +3084,11 @@
 	u8 __iomem *bd;			/* BD pointer */
 	u32 bd_status;
 	u8 txQ = 0;
+	unsigned long flags;
 
 	ugeth_vdbg("%s: IN", __func__);
 
-	spin_lock_irq(&ugeth->lock);
+	spin_lock_irqsave(&ugeth->lock, flags);
 
 	dev->stats.tx_bytes += skb->len;
 
@@ -3144,7 +3145,7 @@
 	uccf = ugeth->uccf;
 	out_be16(uccf->p_utodr, UCC_FAST_TOD);
 #endif
-	spin_unlock_irq(&ugeth->lock);
+	spin_unlock_irqrestore(&ugeth->lock, flags);
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index c746782..f968c83 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -250,6 +250,8 @@
 		DEFAULT_GPIO_RESET )
 PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
 		DEFAULT_GPIO_RESET | PEGASUS_II )
+PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
+		DEFAULT_GPIO_RESET | PEGASUS_II )
 PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
 		DEFAULT_GPIO_RESET)
 PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 081402c..1fd7058 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1220,6 +1220,7 @@
 	struct rhine_private *rp = netdev_priv(dev);
 	void __iomem *ioaddr = rp->base;
 	unsigned entry;
+	unsigned long flags;
 
 	/* Caution: the write order is important here, set the field
 	   with the "ownership" bits last. */
@@ -1263,7 +1264,7 @@
 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
 	/* lock eth irq */
-	spin_lock_irq(&rp->lock);
+	spin_lock_irqsave(&rp->lock, flags);
 	wmb();
 	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
 	wmb();
@@ -1282,7 +1283,7 @@
 
 	dev->trans_start = jiffies;
 
-	spin_unlock_irq(&rp->lock);
+	spin_unlock_irqrestore(&rp->lock, flags);
 
 	if (debug > 4) {
 		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e56cf6b..ced1446 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1789,7 +1789,7 @@
 			 *	 mode
 			 */
 			if (vptr->rev_id < REV_ID_VT3216_A0) {
-				if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
+				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
 					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 				else
 					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 31279c4..51e9ce4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,6 +70,9 @@
 	struct sk_buff_head recv;
 	struct sk_buff_head send;
 
+	/* Work struct for refilling if we run low on memory. */
+	struct delayed_work refill;
+
 	/* Chain pages by the private ptr. */
 	struct page *pages;
 };
@@ -273,19 +276,22 @@
 	dev_kfree_skb(skb);
 }
 
-static void try_fill_recv_maxbufs(struct virtnet_info *vi)
+static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
 {
 	struct sk_buff *skb;
 	struct scatterlist sg[2+MAX_SKB_FRAGS];
 	int num, err, i;
+	bool oom = false;
 
 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
 	for (;;) {
 		struct virtio_net_hdr *hdr;
 
 		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
-		if (unlikely(!skb))
+		if (unlikely(!skb)) {
+			oom = true;
 			break;
+		}
 
 		skb_reserve(skb, NET_IP_ALIGN);
 		skb_put(skb, MAX_PACKET_LEN);
@@ -296,7 +302,7 @@
 		if (vi->big_packets) {
 			for (i = 0; i < MAX_SKB_FRAGS; i++) {
 				skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-				f->page = get_a_page(vi, GFP_ATOMIC);
+				f->page = get_a_page(vi, gfp);
 				if (!f->page)
 					break;
 
@@ -325,31 +331,35 @@
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
+	return !oom;
 }
 
-static void try_fill_recv(struct virtnet_info *vi)
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
 {
 	struct sk_buff *skb;
 	struct scatterlist sg[1];
 	int err;
+	bool oom = false;
 
-	if (!vi->mergeable_rx_bufs) {
-		try_fill_recv_maxbufs(vi);
-		return;
-	}
+	if (!vi->mergeable_rx_bufs)
+		return try_fill_recv_maxbufs(vi, gfp);
 
 	for (;;) {
 		skb_frag_t *f;
 
 		skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
-		if (unlikely(!skb))
+		if (unlikely(!skb)) {
+			oom = true;
 			break;
+		}
 
 		skb_reserve(skb, NET_IP_ALIGN);
 
 		f = &skb_shinfo(skb)->frags[0];
-		f->page = get_a_page(vi, GFP_ATOMIC);
+		f->page = get_a_page(vi, gfp);
 		if (!f->page) {
+			oom = true;
 			kfree_skb(skb);
 			break;
 		}
@@ -373,6 +383,7 @@
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
+	return !oom;
 }
 
 static void skb_recv_done(struct virtqueue *rvq)
@@ -385,6 +396,23 @@
 	}
 }
 
+static void refill_work(struct work_struct *work)
+{
+	struct virtnet_info *vi;
+	bool still_empty;
+
+	vi = container_of(work, struct virtnet_info, refill.work);
+	napi_disable(&vi->napi);
+	try_fill_recv(vi, GFP_KERNEL);
+	still_empty = (vi->num == 0);
+	napi_enable(&vi->napi);
+
+	/* In theory, this can happen: if we don't get any buffers in
+	 * we will *never* try to fill again. */
+	if (still_empty)
+		schedule_delayed_work(&vi->refill, HZ/2);
+}
+
 static int virtnet_poll(struct napi_struct *napi, int budget)
 {
 	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
@@ -400,10 +428,10 @@
 		received++;
 	}
 
-	/* FIXME: If we oom and completely run out of inbufs, we need
-	 * to start a timer trying to fill more. */
-	if (vi->num < vi->max / 2)
-		try_fill_recv(vi);
+	if (vi->num < vi->max / 2) {
+		if (!try_fill_recv(vi, GFP_ATOMIC))
+			schedule_delayed_work(&vi->refill, 0);
+	}
 
 	/* Out of packets? */
 	if (received < budget) {
@@ -894,6 +922,7 @@
 	vi->vdev = vdev;
 	vdev->priv = vi;
 	vi->pages = NULL;
+	INIT_DELAYED_WORK(&vi->refill, refill_work);
 
 	/* If they give us a callback when all buffers are done, we don't need
 	 * the timer. */
@@ -942,7 +971,7 @@
 	}
 
 	/* Last of all, set up some receive buffers. */
-	try_fill_recv(vi);
+	try_fill_recv(vi, GFP_KERNEL);
 
 	/* If we didn't even get one input buffer, we're useless. */
 	if (vi->num == 0) {
@@ -959,6 +988,7 @@
 
 unregister:
 	unregister_netdev(dev);
+	cancel_delayed_work_sync(&vi->refill);
 free_vqs:
 	vdev->config->del_vqs(vdev);
 free:
@@ -987,6 +1017,7 @@
 	BUG_ON(vi->num != 0);
 
 	unregister_netdev(vi->dev);
+	cancel_delayed_work_sync(&vi->refill);
 
 	vdev->config->del_vqs(vi->vdev);
 
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 3838f9f..8d58e6e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2893,45 +2893,27 @@
 	return 0;
 }
 
-static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
-				 u32 src_phys, u32 dest_address, u32 length)
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
+				 int nr, u32 dest_address, u32 len)
 {
-	u32 bytes_left = length;
-	u32 src_offset = 0;
-	u32 dest_offset = 0;
-	int status = 0;
+	int ret, i;
+	u32 size;
+
 	IPW_DEBUG_FW(">> \n");
-	IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
-			  src_phys, dest_address, length);
-	while (bytes_left > CB_MAX_LENGTH) {
-		status = ipw_fw_dma_add_command_block(priv,
-						      src_phys + src_offset,
-						      dest_address +
-						      dest_offset,
-						      CB_MAX_LENGTH, 0, 0);
-		if (status) {
+	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
+			  nr, dest_address, len);
+
+	for (i = 0; i < nr; i++) {
+		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
+		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
+						   dest_address +
+						   i * CB_MAX_LENGTH, size,
+						   0, 0);
+		if (ret) {
 			IPW_DEBUG_FW_INFO(": Failed\n");
 			return -1;
 		} else
 			IPW_DEBUG_FW_INFO(": Added new cb\n");
-
-		src_offset += CB_MAX_LENGTH;
-		dest_offset += CB_MAX_LENGTH;
-		bytes_left -= CB_MAX_LENGTH;
-	}
-
-	/* add the buffer tail */
-	if (bytes_left > 0) {
-		status =
-		    ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
-						 dest_address + dest_offset,
-						 bytes_left, 0, 0);
-		if (status) {
-			IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
-			return -1;
-		} else
-			IPW_DEBUG_FW_INFO
-			    (": Adding new cb - the buffer tail\n");
 	}
 
 	IPW_DEBUG_FW("<< \n");
@@ -3179,59 +3161,91 @@
 
 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
 {
-	int rc = -1;
+	int ret = -1;
 	int offset = 0;
 	struct fw_chunk *chunk;
-	dma_addr_t shared_phys;
-	u8 *shared_virt;
+	int total_nr = 0;
+	int i;
+	struct pci_pool *pool;
+	u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
+	dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
 
 	IPW_DEBUG_TRACE("<< : \n");
-	shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
 
-	if (!shared_virt)
+	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
+	if (!pool) {
+		IPW_ERROR("pci_pool_create failed\n");
 		return -ENOMEM;
-
-	memmove(shared_virt, data, len);
+	}
 
 	/* Start the Dma */
-	rc = ipw_fw_dma_enable(priv);
+	ret = ipw_fw_dma_enable(priv);
 
 	/* the DMA is already ready this would be a bug. */
 	BUG_ON(priv->sram_desc.last_cb_index > 0);
 
 	do {
+		u32 chunk_len;
+		u8 *start;
+		int size;
+		int nr = 0;
+
 		chunk = (struct fw_chunk *)(data + offset);
 		offset += sizeof(struct fw_chunk);
+		chunk_len = le32_to_cpu(chunk->length);
+		start = data + offset;
+
+		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
+		for (i = 0; i < nr; i++) {
+			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
+							 &phys[total_nr]);
+			if (!virts[total_nr]) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
+				     CB_MAX_LENGTH);
+			memcpy(virts[total_nr], start, size);
+			start += size;
+			total_nr++;
+			/* We don't support fw chunk larger than 64*8K */
+			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
+		}
+
 		/* build DMA packet and queue up for sending */
 		/* dma to chunk->address, the chunk->length bytes from data +
 		 * offeset*/
 		/* Dma loading */
-		rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
-					   le32_to_cpu(chunk->address),
-					   le32_to_cpu(chunk->length));
-		if (rc) {
+		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
+					    nr, le32_to_cpu(chunk->address),
+					    chunk_len);
+		if (ret) {
 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
 			goto out;
 		}
 
-		offset += le32_to_cpu(chunk->length);
+		offset += chunk_len;
 	} while (offset < len);
 
 	/* Run the DMA and wait for the answer */
-	rc = ipw_fw_dma_kick(priv);
-	if (rc) {
+	ret = ipw_fw_dma_kick(priv);
+	if (ret) {
 		IPW_ERROR("dmaKick Failed\n");
 		goto out;
 	}
 
-	rc = ipw_fw_dma_wait(priv);
-	if (rc) {
+	ret = ipw_fw_dma_wait(priv);
+	if (ret) {
 		IPW_ERROR("dmaWaitSync Failed\n");
 		goto out;
 	}
-      out:
-	pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
-	return rc;
+ out:
+	for (i = 0; i < total_nr; i++)
+		pci_pool_free(pool, virts[i], phys[i]);
+
+	pci_pool_destroy(pool);
+
+	return ret;
 }
 
 /* stop nic */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 40d8dfa..359652d 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -644,7 +644,7 @@
 	int err = 0;
 	u8 tsc_arr[4][ORINOCO_SEQ_LEN];
 
-	if ((key < 0) || (key > 4))
+	if ((key < 0) || (key >= 4))
 		return -EINVAL;
 
 	err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 9679b29..2017ccc 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -871,6 +871,9 @@
 	priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
 	rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
 
+	/* ENEDCA flag must always be set, transmit issues? */
+	rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+
 	return 0;
 }
 
@@ -1176,13 +1179,16 @@
 			rtl818x_iowrite8(priv, &priv->map->BSSID[i],
 					 info->bssid[i]);
 
+		if (priv->is_rtl8187b)
+			reg = RTL818X_MSR_ENEDCA;
+		else
+			reg = 0;
+
 		if (is_valid_ether_addr(info->bssid)) {
-			reg = RTL818X_MSR_INFRA;
-			if (priv->is_rtl8187b)
-				reg |= RTL818X_MSR_ENEDCA;
+			reg |= RTL818X_MSR_INFRA;
 			rtl818x_iowrite8(priv, &priv->map->MSR, reg);
 		} else {
-			reg = RTL818X_MSR_NO_LINK;
+			reg |= RTL818X_MSR_NO_LINK;
 			rtl818x_iowrite8(priv, &priv->map->MSR, reg);
 		}
 
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 9509477..4987040 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -346,7 +346,7 @@
 static int yellowfin_open(struct net_device *dev);
 static void yellowfin_timer(unsigned long data);
 static void yellowfin_tx_timeout(struct net_device *dev);
-static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_init_ring(struct net_device *dev);
 static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 					struct net_device *dev);
 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
@@ -574,19 +574,24 @@
 {
 	struct yellowfin_private *yp = netdev_priv(dev);
 	void __iomem *ioaddr = yp->base;
-	int i;
+	int i, ret;
 
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
-	if (i) return i;
+	ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+	if (ret)
+		return ret;
 
 	if (yellowfin_debug > 1)
 		printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
 			   dev->name, dev->irq);
 
-	yellowfin_init_ring(dev);
+	ret = yellowfin_init_ring(dev);
+	if (ret) {
+		free_irq(dev->irq, dev);
+		return ret;
+	}
 
 	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -726,10 +731,10 @@
 }
 
 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void yellowfin_init_ring(struct net_device *dev)
+static int yellowfin_init_ring(struct net_device *dev)
 {
 	struct yellowfin_private *yp = netdev_priv(dev);
-	int i;
+	int i, j;
 
 	yp->tx_full = 0;
 	yp->cur_rx = yp->cur_tx = 0;
@@ -754,6 +759,11 @@
 		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 	}
+	if (i != RX_RING_SIZE) {
+		for (j = 0; j < i; j++)
+			dev_kfree_skb(yp->rx_skbuff[j]);
+		return -ENOMEM;
+	}
 	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 
@@ -770,8 +780,6 @@
 	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 #else
 {
-	int j;
-
 	/* Tx ring needs a pair of descriptors, the second for the status. */
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		j = 2*i;
@@ -806,7 +814,7 @@
 }
 #endif
 	yp->tx_tail_desc = &yp->tx_status[0];
-	return;
+	return 0;
 }
 
 static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 0ffa41d..710e901 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -22,6 +22,11 @@
 {
 	__u64	bytes;
 	__u32	packets;
+};
+struct gnet_stats_basic_packed
+{
+	__u64	bytes;
+	__u32	packets;
 } __attribute__ ((packed));
 
 /**
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 565eed8..c05fd71 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -16,7 +16,7 @@
 	u32				tcfc_capab;
 	int				tcfc_action;
 	struct tcf_t			tcfc_tm;
-	struct gnet_stats_basic		tcfc_bstats;
+	struct gnet_stats_basic_packed	tcfc_bstats;
 	struct gnet_stats_queue		tcfc_qstats;
 	struct gnet_stats_rate_est	tcfc_rate_est;
 	spinlock_t			tcfc_lock;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index d136b52..c148855 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -28,7 +28,7 @@
 					spinlock_t *lock, struct gnet_dump *d);
 
 extern int gnet_stats_copy_basic(struct gnet_dump *d,
-				 struct gnet_stats_basic *b);
+				 struct gnet_stats_basic_packed *b);
 extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
 				    struct gnet_stats_rate_est *r);
 extern int gnet_stats_copy_queue(struct gnet_dump *d,
@@ -37,14 +37,14 @@
 
 extern int gnet_stats_finish_copy(struct gnet_dump *d);
 
-extern int gen_new_estimator(struct gnet_stats_basic *bstats,
+extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 			     struct gnet_stats_rate_est *rate_est,
 			     spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic *bstats,
+extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
 			       struct gnet_stats_rate_est *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic *bstats,
+extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
 				 struct gnet_stats_rate_est *rate_est,
 				 spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic *bstats,
+extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
 				 const struct gnet_stats_rate_est *rate_est);
 #endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 65d594d..ddbf37e 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -8,7 +8,7 @@
 	spinlock_t			lock;
 	struct gnet_estimator		params;
 	struct gnet_stats_rate_est	rstats;
-	struct gnet_stats_basic		bstats;
+	struct gnet_stats_basic_packed	bstats;
 };
 
 extern struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 84b3fc2..a48a4cc 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -73,7 +73,7 @@
 	 */
 	unsigned long		state;
 	struct sk_buff_head	q;
-	struct gnet_stats_basic bstats;
+	struct gnet_stats_basic_packed bstats;
 	struct gnet_stats_queue	qstats;
 };
 
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 78e5bfc..493775f 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -81,7 +81,7 @@
 struct gen_estimator
 {
 	struct list_head	list;
-	struct gnet_stats_basic	*bstats;
+	struct gnet_stats_basic_packed	*bstats;
 	struct gnet_stats_rate_est	*rate_est;
 	spinlock_t		*stats_lock;
 	int			ewma_log;
@@ -165,7 +165,7 @@
 }
 
 static
-struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
+struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
 				    const struct gnet_stats_rate_est *rate_est)
 {
 	struct rb_node *p = est_root.rb_node;
@@ -202,7 +202,7 @@
  *
  * NOTE: Called under rtnl_mutex
  */
-int gen_new_estimator(struct gnet_stats_basic *bstats,
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 		      struct gnet_stats_rate_est *rate_est,
 		      spinlock_t *stats_lock,
 		      struct nlattr *opt)
@@ -262,7 +262,7 @@
  *
  * NOTE: Called under rtnl_mutex
  */
-void gen_kill_estimator(struct gnet_stats_basic *bstats,
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
 			struct gnet_stats_rate_est *rate_est)
 {
 	struct gen_estimator *e;
@@ -292,7 +292,7 @@
  *
  * Returns 0 on success or a negative error code.
  */
-int gen_replace_estimator(struct gnet_stats_basic *bstats,
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
 			  struct gnet_stats_rate_est *rate_est,
 			  spinlock_t *stats_lock, struct nlattr *opt)
 {
@@ -308,7 +308,7 @@
  *
  * Returns true if estimator is active, and false if not.
  */
-bool gen_estimator_active(const struct gnet_stats_basic *bstats,
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
 			  const struct gnet_stats_rate_est *rate_est)
 {
 	ASSERT_RTNL();
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index c3d0ffe..8569310 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -106,16 +106,21 @@
  * if the room in the socket buffer was not sufficient.
  */
 int
-gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b)
+gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
 {
 	if (d->compat_tc_stats) {
 		d->tc_stats.bytes = b->bytes;
 		d->tc_stats.packets = b->packets;
 	}
 
-	if (d->tail)
-		return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b));
+	if (d->tail) {
+		struct gnet_stats_basic sb;
 
+		memset(&sb, 0, sizeof(sb));
+		sb.bytes = b->bytes;
+		sb.packets = b->packets;
+		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
+	}
 	return 0;
 }
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0ac3091..0b4d0d3 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -323,6 +323,11 @@
 
 			udelay(USEC_PER_POLL);
 		}
+
+		WARN_ONCE(!irqs_disabled(),
+			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+			dev->name, ops->ndo_start_xmit);
+
 		local_irq_restore(flags);
 	}
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 3ac34ea..30d5446 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1035,6 +1035,7 @@
 		sk->sk_prot = sk->sk_prot_creator = prot;
 		sock_lock_init(sk);
 		sock_net_set(sk, get_net(net));
+		atomic_set(&sk->sk_wmem_alloc, 1);
 	}
 
 	return sk;
@@ -1882,7 +1883,6 @@
 	 */
 	smp_wmb();
 	atomic_set(&sk->sk_refcnt, 1);
-	atomic_set(&sk->sk_wmem_alloc, 1);
 	atomic_set(&sk->sk_drops, 0);
 }
 EXPORT_SYMBOL(sock_init_data);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5b1af70..533afaa 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -951,7 +951,7 @@
 			addend += 4;
 	}
 	dev->needed_headroom = addend + hlen;
-	mtu -= dev->hard_header_len - addend;
+	mtu -= dev->hard_header_len + addend;
 
 	if (mtu < 68)
 		mtu = 68;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bf85d5f9..a123a32 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -306,8 +306,10 @@
 		    v4addr != htonl(INADDR_ANY) &&
 		    chk_addr_ret != RTN_LOCAL &&
 		    chk_addr_ret != RTN_MULTICAST &&
-		    chk_addr_ret != RTN_BROADCAST)
+		    chk_addr_ret != RTN_BROADCAST) {
+			err = -EADDRNOTAVAIL;
 			goto out;
+		}
 	} else {
 		if (addr_type != IPV6_ADDR_ANY) {
 			struct net_device *dev = NULL;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 9208cf5..c45eee1 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -914,6 +914,7 @@
 	struct llc_sock *llc = llc_sk(sk);
 	int rc = 0;
 
+	memset(&sllc, 0, sizeof(sllc));
 	lock_sock(sk);
 	if (sock_flag(sk, SOCK_ZAPPED))
 		goto out;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 43f5676..d80b819 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -74,7 +74,7 @@
 xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
 {
 	const struct xt_rateest_target_info *info = par->targinfo;
-	struct gnet_stats_basic *stats = &info->est->bstats;
+	struct gnet_stats_basic_packed *stats = &info->est->bstats;
 
 	spin_lock_bh(&info->est->lock);
 	stats->bytes += skb->len;
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 98fc190..390b7d0 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -52,7 +52,7 @@
 
 	q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
 	if (q->master == NULL)
-		return -ENOMEM;
+		return false;
 
 	q->master->quota = q->quota;
 	return true;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e943c16..4eb1ac9 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -630,23 +630,23 @@
 	return dev;
 }
 
-static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
+static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
+	ax25_address *digipeaters)
 {
-	static ax25_digi ax25_digi;
 	int i;
 
 	if (ndigis == 0)
 		return NULL;
 
 	for (i = 0; i < ndigis; i++) {
-		ax25_digi.calls[i]    = digipeaters[i];
-		ax25_digi.repeated[i] = 0;
+		digi->calls[i]    = digipeaters[i];
+		digi->repeated[i] = 0;
 	}
 
-	ax25_digi.ndigi      = ndigis;
-	ax25_digi.lastrepeat = -1;
+	digi->ndigi      = ndigis;
+	digi->lastrepeat = -1;
 
-	return &ax25_digi;
+	return digi;
 }
 
 /*
@@ -656,6 +656,7 @@
 {
 	struct nr_route_struct nr_route;
 	struct net_device *dev;
+	ax25_digi digi;
 	int ret;
 
 	switch (cmd) {
@@ -673,13 +674,15 @@
 			ret = nr_add_node(&nr_route.callsign,
 				nr_route.mnemonic,
 				&nr_route.neighbour,
-				nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
+				nr_call_to_digi(&digi, nr_route.ndigis,
+						nr_route.digipeaters),
 				dev, nr_route.quality,
 				nr_route.obs_count);
 			break;
 		case NETROM_NEIGH:
 			ret = nr_add_neigh(&nr_route.callsign,
-				nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
+				nr_call_to_digi(&digi, nr_route.ndigis,
+						nr_route.digipeaters),
 				dev, nr_route.quality);
 			break;
 		default:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2a8b83a..ab82f14 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -49,7 +49,7 @@
 	struct socket		*sock;		/* for closing */
 	u32			classid;	/* x:y type ID */
 	int			ref;		/* reference count */
-	struct gnet_stats_basic	bstats;
+	struct gnet_stats_basic_packed	bstats;
 	struct gnet_stats_queue	qstats;
 	struct atm_flow_data	*next;
 	struct atm_flow_data	*excess;	/* flow for excess traffic;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 23a1676..d5798e1 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -128,7 +128,7 @@
 	long			avgidle;
 	long			deficit;	/* Saved deficit for WRR */
 	psched_time_t		penalized;
-	struct gnet_stats_basic bstats;
+	struct gnet_stats_basic_packed bstats;
 	struct gnet_stats_queue qstats;
 	struct gnet_stats_rate_est rate_est;
 	struct tc_cbq_xstats	xstats;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 7597fe1..12b2fb04 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -22,7 +22,7 @@
 	unsigned int			refcnt;
 	unsigned int			filter_cnt;
 
-	struct gnet_stats_basic		bstats;
+	struct gnet_stats_basic_packed		bstats;
 	struct gnet_stats_queue		qstats;
 	struct gnet_stats_rate_est	rate_est;
 	struct list_head		alist;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 362c281..dad0144 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -116,7 +116,7 @@
 	struct Qdisc_class_common cl_common;
 	unsigned int	refcnt;		/* usage count */
 
-	struct gnet_stats_basic bstats;
+	struct gnet_stats_basic_packed bstats;
 	struct gnet_stats_queue qstats;
 	struct gnet_stats_rate_est rate_est;
 	unsigned int	level;		/* class level in hierarchy */
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 88cd026..ec4d463 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -74,7 +74,7 @@
 struct htb_class {
 	struct Qdisc_class_common common;
 	/* general class parameters */
-	struct gnet_stats_basic bstats;
+	struct gnet_stats_basic_packed bstats;
 	struct gnet_stats_queue qstats;
 	struct gnet_stats_rate_est rate_est;
 	struct tc_htb_xstats xstats;	/* our special stats */