Merge git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/v4l-dvb

* git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/v4l-dvb: (70 commits)
  V4L/DVB (7900): pvrusb: Fix Kconfig if DVB=m V4L_core=y
  V4L/DVB (7899): Fixes a few remaining Kbuild issues at common/tuners
  V4L/DVB (7898): Fix VIDEO_MEDIA Kconfig logic
  V4L/DVB (7895): tveeprom: update Hauppauge analog audio and video decoders
  V4L/DVB (7893): xc5000: bug-fix: allow multiple devices in a single system
  V4L/DVB (7891): cx18/ivtv: fix open() kernel oops
  V4L/DVB (7890): cx18: removed bogus and confusing conditional
  V4L/DVB (7889): cx18: improve HVR-1600 detection.
  V4L/DVB (7888): cx18: minor card definition updates.
  V4L/DVB (7887): cx18: fix Compro H900 analog support.
  V4L/DVB (7881): saa7134: fixed a compile warning in saa7134-core.c
  V4L/DVB (7880): saa7134: remove explicit GPIO initialization
  V4L/DVB(7879): Adding cx18 Support for mxl5005s
  V4L/DVB(7878): mxl55005s: Makefile and Kconfig additions
  V4L/DVB(7877): mxl5005s: Ensure debug is off
  V4L/DVB(7876): mxl5005s: Remove incorrect copyright holders
  V4L/DVB(7875): mxl5005s: Remove redundant functions
  V4L/DVB(7874): mxl5005s: Fix function statics
  V4L/DVB(7873): mxl5005s: Fix header includes.
  V4L/DVB(7872): mxl5005s: checkpatch.pl compliance
  ...
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index e5a819a..f5b7127 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -994,7 +994,17 @@
 	DATA DEPENDENCY	read_barrier_depends()	smp_read_barrier_depends()
 
 
-All CPU memory barriers unconditionally imply compiler barriers.
+All memory barriers except the data dependency barriers imply a compiler
+barrier. Data dependencies do not impose any additional compiler ordering.
+
+Aside: In the case of data dependencies, the compiler would be expected to
+issue the loads in the correct order (eg. `a[b]` would have to load the value
+of b before loading a[b]), however there is no guarantee in the C specification
+that the compiler may not speculate the value of b (eg. is equal to 1) and load
+a before b (eg. tmp = a[1]; if (b != 1) tmp = a[b]; ). There is also the
+problem of a compiler reloading b after having loaded a[b], thus having a newer
+copy of b than a[b]. A consensus has not yet been reached about these problems,
+however the ACCESS_ONCE macro is a good place to start looking.
 
 SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
 systems because it is assumed that a CPU will appear to be self-consistent,
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
index 22fab6c..7da00b7 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
@@ -12,11 +12,6 @@
 
 SECTIONS
 {
-	. = HEADER_OFFSET;
-	.header : {
-		 *(.header)
-	}
-
 	. = 0;
 	.text : {
 		 *(.text*)
@@ -50,6 +45,11 @@
 		__bss_end = .;
 	}
 
+	. = HEADER_OFFSET;
+	.header : {
+		*(.header)
+	}
+
 	. = ALIGN(16);
 	_end = .;
 
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fb03ef3..a7835f2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1303,6 +1303,9 @@
 #define genregs32_get		genregs_get
 #define genregs32_set		genregs_set
 
+#define user_i387_ia32_struct	user_i387_struct
+#define user32_fxsr_struct	user_fxsr_struct
+
 #endif	/* CONFIG_X86_64 */
 
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -1315,13 +1318,13 @@
 	},
 	[REGSET_FP] = {
 		.core_note_type = NT_PRFPREG,
-		.n = sizeof(struct user_i387_struct) / sizeof(u32),
+		.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
 		.size = sizeof(u32), .align = sizeof(u32),
 		.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
 	},
 	[REGSET_XFP] = {
 		.core_note_type = NT_PRXFPREG,
-		.n = sizeof(struct user_i387_struct) / sizeof(u32),
+		.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
 		.size = sizeof(u32), .align = sizeof(u32),
 		.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
 	},
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c0c68c1..6f80b85 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -12,6 +12,7 @@
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 
+#ifdef CONFIG_X86_LOCAL_APIC
 unsigned int num_processors;
 unsigned disabled_cpus __cpuinitdata;
 /* Processor that is doing the boot up */
@@ -23,8 +24,9 @@
 
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
+#endif
 
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
+#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
 /*
  * Copy data used in early init routines from the initial arrays to the
  * per cpu data areas.  These arrays then become expendable and the
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index f2fc8fe..6dff128 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -951,7 +951,7 @@
 static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
 {
 	if (c->x86 == 0x6 && c->x86_model >= 0xf)
-		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 }
 
 static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 8f75893..0cb7aad 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -231,7 +231,8 @@
 	wmb();
 
 	/* Send a message to other CPUs */
-	if (cpus_equal(mask, allbutself))
+	if (cpus_equal(mask, allbutself) &&
+	    cpus_equal(cpu_online_map, cpu_callout_map))
 		send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 	else
 		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6b087ab..3898849 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -86,6 +86,7 @@
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
+static int low_mappings;
 #endif
 
 /* State of each CPU */
@@ -326,6 +327,12 @@
 		enable_8259A_irq(0);
 	}
 
+#ifdef CONFIG_X86_32
+	while (low_mappings)
+		cpu_relax();
+	__flush_tlb_all();
+#endif
+
 	/* This must be done before setting cpu_online_map */
 	set_cpu_sibling_map(raw_smp_processor_id());
 	wmb();
@@ -1040,14 +1047,20 @@
 #ifdef CONFIG_X86_32
 	/* init low mem mapping */
 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-			min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+		min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
 	flush_tlb_all();
-#endif
+	low_mappings = 1;
 
 	err = do_boot_cpu(apicid, cpu);
-	if (err < 0) {
+
+	zap_low_mappings();
+	low_mappings = 0;
+#else
+	err = do_boot_cpu(apicid, cpu);
+#endif
+	if (err) {
 		Dprintk("do_boot_cpu failed %d\n", err);
-		return err;
+		return -EIO;
 	}
 
 	/*
@@ -1259,9 +1272,6 @@
 	setup_ioapic_dest();
 #endif
 	check_nmi_watchdog();
-#ifdef CONFIG_X86_32
-	zap_low_mappings();
-#endif
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 58882f9..f6c05d0 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -2,6 +2,7 @@
    All C exports should go in the respective C files. */
 
 #include <linux/module.h>
+#include <net/checksum.h>
 #include <linux/smp.h>
 
 #include <asm/processor.h>
@@ -29,6 +30,8 @@
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
+EXPORT_SYMBOL(csum_partial);
+
 /*
  * Export string functions. We normally rely on gcc builtin for most of these,
  * but gcc sometimes decides not to inline them.
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index bc503f5..bf51144 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -136,8 +136,6 @@
 						(__force u32)sum);
 }
 
-EXPORT_SYMBOL(csum_partial);
-
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index de236e4..ec30d10 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -438,8 +438,6 @@
 {
 	int i;
 
-	save_pg_dir();
-
 	/*
 	 * Zap initial low-memory mappings.
 	 *
@@ -663,16 +661,8 @@
 		test_wp_bit();
 
 	cpa_init();
-
-	/*
-	 * Subtle. SMP is doing it's boot stuff late (because it has to
-	 * fork idle threads) - but it also needs low mappings for the
-	 * protected-mode entry to work. We zap these entries only after
-	 * the WP-bit has been tested.
-	 */
-#ifndef CONFIG_SMP
+	save_pg_dir();
 	zap_low_mappings();
-#endif
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 60adbe2..bcb1a8e 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -555,7 +555,7 @@
 		"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
 			current->comm, current->pid,
 			cattr_name(flags),
-			offset, offset + size);
+			offset, (unsigned long long)(offset + size));
 		return 0;
 	}
 
@@ -576,7 +576,7 @@
 		"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
 			current->comm, current->pid,
 			cattr_name(want_flags),
-			addr, addr + size,
+			addr, (unsigned long long)(addr + size),
 			cattr_name(flags));
 	}
 }
diff --git a/block/genhd.c b/block/genhd.c
index fda9c7a..129ad93 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -653,7 +653,7 @@
 EXPORT_SYMBOL_GPL(genhd_media_change_notify);
 #endif  /*  0  */
 
-dev_t blk_lookup_devt(const char *name)
+dev_t blk_lookup_devt(const char *name, int part)
 {
 	struct device *dev;
 	dev_t devt = MKDEV(0, 0);
@@ -661,7 +661,11 @@
 	mutex_lock(&block_class_lock);
 	list_for_each_entry(dev, &block_class.devices, node) {
 		if (strcmp(dev->bus_id, name) == 0) {
-			devt = dev->devt;
+			struct gendisk *disk = dev_to_disk(dev);
+
+			if (part < disk->minors)
+				devt = MKDEV(MAJOR(dev->devt),
+					     MINOR(dev->devt) + part);
 			break;
 		}
 	}
@@ -669,7 +673,6 @@
 
 	return devt;
 }
-
 EXPORT_SYMBOL(blk_lookup_devt);
 
 struct gendisk *alloc_disk(int minors)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0ef00e8..e085af0 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -140,7 +140,6 @@
 
 	pr_debug("device class '%s': registering\n", cls->name);
 
-	INIT_LIST_HEAD(&cls->children);
 	INIT_LIST_HEAD(&cls->devices);
 	INIT_LIST_HEAD(&cls->interfaces);
 	kset_init(&cls->class_dirs);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ce6de5..937e825 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -53,11 +53,13 @@
 {
         return blocking_notifier_chain_register(&memory_chain, nb);
 }
+EXPORT_SYMBOL(register_memory_notifier);
 
 void unregister_memory_notifier(struct notifier_block *nb)
 {
         blocking_notifier_chain_unregister(&memory_chain, nb);
 }
+EXPORT_SYMBOL(unregister_memory_notifier);
 
 /*
  * register_memory - Setup a sysfs device for a memory block
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d27f54a..9f6cc8a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2426,7 +2426,7 @@
 
 config EHEA
 	tristate "eHEA Ethernet support"
-	depends on IBMEBUS && INET && SPARSEMEM
+	depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG
 	select INET_LRO
 	---help---
 	  This driver supports the IBM pSeries eHEA ethernet adapter.
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0afe522..9c2394d 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1,7 +1,7 @@
 /*
  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
  *
  * Derived from Intel e1000 driver
  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
@@ -36,7 +36,6 @@
  * A very incomplete list of things that need to be dealt with:
  *
  * TODO:
- * Wake on LAN.
  * Add more ethtool functions.
  * Fix abstruse irq enable/disable condition described here:
  *	http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
@@ -638,21 +637,18 @@
 }
 
 /*
- *TODO: do something or get rid of this
+ * Force the PHY into power saving mode using vendor magic.
  */
 #ifdef CONFIG_PM
-static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
+static void atl1_phy_enter_power_saving(struct atl1_hw *hw)
 {
-/*    s32 ret_val;
- *    u16 phy_data;
- */
+	atl1_write_phy_reg(hw, MII_DBG_ADDR, 0);
+	atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E);
+	atl1_write_phy_reg(hw, MII_DBG_ADDR, 2);
+	atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
+	atl1_write_phy_reg(hw, MII_DBG_ADDR, 3);
+	atl1_write_phy_reg(hw, MII_DBG_DATA, 0);
 
-/*
-    ret_val = atl1_write_phy_reg(hw, ...);
-    ret_val = atl1_write_phy_reg(hw, ...);
-    ....
-*/
-	return 0;
 }
 #endif
 
@@ -2784,64 +2780,93 @@
 	struct atl1_hw *hw = &adapter->hw;
 	u32 ctrl = 0;
 	u32 wufc = adapter->wol;
+	u32 val;
+	int retval;
+	u16 speed;
+	u16 duplex;
 
 	netif_device_detach(netdev);
 	if (netif_running(netdev))
 		atl1_down(adapter);
 
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
-	if (ctrl & BMSR_LSTATUS)
+	val = ctrl & BMSR_LSTATUS;
+	if (val)
 		wufc &= ~ATLX_WUFC_LNKC;
 
-	/* reduce speed to 10/100M */
-	if (wufc) {
-		atl1_phy_enter_power_saving(hw);
-		/* if resume, let driver to re- setup link */
-		hw->phy_configured = false;
-		atl1_set_mac_addr(hw);
-		atlx_set_multi(netdev);
+	if (val && wufc) {
+		val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+		if (val) {
+			if (netif_msg_ifdown(adapter))
+				dev_printk(KERN_DEBUG, &pdev->dev,
+					"error getting speed/duplex\n");
+			goto disable_wol;
+		}
 
 		ctrl = 0;
-		/* turn on magic packet wol */
+
+		/* enable magic packet WOL */
 		if (wufc & ATLX_WUFC_MAG)
-			ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
-
-		/* turn on Link change WOL */
-		if (wufc & ATLX_WUFC_LNKC)
-			ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+			ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
 		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+		ioread32(hw->hw_addr + REG_WOL_CTRL);
 
-		/* turn on all-multi mode if wake on multicast is enabled */
-		ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
-		ctrl &= ~MAC_CTRL_DBG;
-		ctrl &= ~MAC_CTRL_PROMIS_EN;
-		if (wufc & ATLX_WUFC_MC)
-			ctrl |= MAC_CTRL_MC_ALL_EN;
-		else
-			ctrl &= ~MAC_CTRL_MC_ALL_EN;
-
-		/* turn on broadcast mode if wake on-BC is enabled */
-		if (wufc & ATLX_WUFC_BC)
+		/* configure the mac */
+		ctrl = MAC_CTRL_RX_EN;
+		ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
+			MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
+		if (duplex == FULL_DUPLEX)
+			ctrl |= MAC_CTRL_DUPLX;
+		ctrl |= (((u32)adapter->hw.preamble_len &
+			MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+		if (adapter->vlgrp)
+			ctrl |= MAC_CTRL_RMV_VLAN;
+		if (wufc & ATLX_WUFC_MAG)
 			ctrl |= MAC_CTRL_BC_EN;
-		else
-			ctrl &= ~MAC_CTRL_BC_EN;
-
-		/* enable RX */
-		ctrl |= MAC_CTRL_RX_EN;
 		iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
-		pci_enable_wake(pdev, PCI_D3hot, 1);
-		pci_enable_wake(pdev, PCI_D3cold, 1);
-	} else {
-		iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
-		pci_enable_wake(pdev, PCI_D3hot, 0);
-		pci_enable_wake(pdev, PCI_D3cold, 0);
+		ioread32(hw->hw_addr + REG_MAC_CTRL);
+
+		/* poke the PHY */
+		ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+		iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+		ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+		goto exit;
 	}
 
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
+	if (!val && wufc) {
+		ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+		ioread32(hw->hw_addr + REG_WOL_CTRL);
+		iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
+		ioread32(hw->hw_addr + REG_MAC_CTRL);
+		hw->phy_configured = false;
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+		goto exit;
+	}
 
-	pci_set_power_state(pdev, PCI_D3hot);
+disable_wol:
+	iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+	ioread32(hw->hw_addr + REG_WOL_CTRL);
+	ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+	iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+	ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+	atl1_phy_enter_power_saving(hw);
+	hw->phy_configured = false;
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+exit:
+	if (netif_running(netdev))
+		pci_disable_msi(adapter->pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
 	return 0;
 }
@@ -2855,20 +2880,26 @@
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
-	/* FIXME: check and handle */
 	err = pci_enable_device(pdev);
+	if (err) {
+		if (netif_msg_ifup(adapter))
+			dev_printk(KERN_DEBUG, &pdev->dev,
+				"error enabling pci device\n");
+		return err;
+	}
+
+	pci_set_master(pdev);
+	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
-	atl1_reset(adapter);
+	atl1_reset_hw(&adapter->hw);
+	adapter->cmb.cmb->int_stats = 0;
 
 	if (netif_running(netdev))
 		atl1_up(adapter);
 	netif_device_attach(netdev);
 
-	atl1_via_workaround(adapter);
-
 	return 0;
 }
 #else
@@ -2876,6 +2907,13 @@
 #define atl1_resume NULL
 #endif
 
+static void atl1_shutdown(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PM
+	atl1_suspend(pdev, PMSG_SUSPEND);
+#endif
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void atl1_poll_controller(struct net_device *netdev)
 {
@@ -3122,7 +3160,8 @@
 	.probe = atl1_probe,
 	.remove = __devexit_p(atl1_remove),
 	.suspend = atl1_suspend,
-	.resume = atl1_resume
+	.resume = atl1_resume,
+	.shutdown = atl1_shutdown
 };
 
 /*
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 51893d6..a5015b1 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -1,7 +1,7 @@
 /*
  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
  *
  * Derived from Intel e1000 driver
  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f06b854..b3e7fcf 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
  * Copyright(c) 2007 Atheros Corporation. All rights reserved.
  *
  * Derived from Intel e1000 driver
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 3be7c09..297a03d 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
  * Copyright(c) 2007 Atheros Corporation. All rights reserved.
  *
  * Derived from Intel e1000 driver
@@ -29,7 +29,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 
-#define ATLX_DRIVER_VERSION "2.1.1"
+#define ATLX_DRIVER_VERSION "2.1.3"
 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
 	Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
 MODULE_LICENSE("GPL");
@@ -460,6 +460,9 @@
 #define MII_ATLX_PSSR_100MBS		0x4000	/* 01=100Mbs */
 #define MII_ATLX_PSSR_1000MBS		0x8000	/* 10=1000Mbs */
 
+#define MII_DBG_ADDR			0x1D
+#define MII_DBG_DATA			0x1E
+
 /* PCI Command Register Bit Definitions */
 #define PCI_REG_COMMAND			0x04	/* PCI Command Register */
 #define CMD_IO_SPACE			0x0001
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 4fdb13f..acebe43 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,6 +71,7 @@
 	USING_MSIX = (1 << 2),
 	QUEUES_BOUND = (1 << 3),
 	TP_PARITY_INIT = (1 << 4),
+	NAPI_INIT = (1 << 5),
 };
 
 struct fl_pg_chunk {
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 91ee727..579bee4 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -698,6 +698,7 @@
 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
 		    int reset);
+int t3_replay_prep_adapter(struct adapter *adapter);
 void t3_led_ready(struct adapter *adapter);
 void t3_fatal_err(struct adapter *adapter);
 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ce949d5..3a31272 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -421,6 +421,13 @@
 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 				       64);
 	}
+
+	/*
+	 * netif_napi_add() can be called only once per napi_struct because it
+	 * adds each new napi_struct to a list.  Be careful not to call it a
+	 * second time, e.g., during EEH recovery, by making a note of it.
+	 */
+	adap->flags |= NAPI_INIT;
 }
 
 /*
@@ -896,7 +903,8 @@
 			goto out;
 
 		setup_rss(adap);
-		init_napi(adap);
+		if (!(adap->flags & NAPI_INIT))
+			init_napi(adap);
 		adap->flags |= FULL_INIT_DONE;
 	}
 
@@ -999,7 +1007,7 @@
 		return 0;
 
 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
-		return err;
+		goto out;
 
 	t3_tp_set_offload_mode(adapter, 1);
 	tdev->lldev = adapter->port[0];
@@ -1061,10 +1069,8 @@
 	int other_ports = adapter->open_device_map & PORT_MASK;
 	int err;
 
-	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
-		quiesce_rx(adapter);
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
 		return err;
-	}
 
 	set_bit(pi->port_id, &adapter->open_device_map);
 	if (is_offload(adapter) && !ofld_disable) {
@@ -2424,14 +2430,11 @@
 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 		offload_close(&adapter->tdev);
 
-	/* Free sge resources */
-	t3_free_sge_resources(adapter);
-
 	adapter->flags &= ~FULL_INIT_DONE;
 
 	pci_disable_device(pdev);
 
-	/* Request a slot slot reset. */
+	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -2448,13 +2451,20 @@
 	if (pci_enable_device(pdev)) {
 		dev_err(&pdev->dev,
 			"Cannot re-enable PCI device after reset.\n");
-		return PCI_ERS_RESULT_DISCONNECT;
+		goto err;
 	}
 	pci_set_master(pdev);
+	pci_restore_state(pdev);
 
-	t3_prep_adapter(adapter, adapter->params.info, 1);
+	/* Free sge resources */
+	t3_free_sge_resources(adapter);
+
+	if (t3_replay_prep_adapter(adapter))
+		goto err;
 
 	return PCI_ERS_RESULT_RECOVERED;
+err:
+	return PCI_ERS_RESULT_DISCONNECT;
 }
 
 /**
@@ -2483,13 +2493,6 @@
 			netif_device_attach(netdev);
 		}
 	}
-
-	if (is_offload(adapter)) {
-		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
-		if (offload_open(adapter->port[0]))
-			printk(KERN_WARNING
-			       "Could not bring back offload capabilities\n");
-	}
 }
 
 static struct pci_error_handlers t3_err_handler = {
@@ -2608,6 +2611,7 @@
 	}
 
 	pci_set_master(pdev);
+	pci_save_state(pdev);
 
 	mmio_start = pci_resource_start(pdev, 0);
 	mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 02dbbb3..5671788 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -444,6 +444,14 @@
 
 #define A_PCIE_CFG 0x88
 
+#define S_ENABLELINKDWNDRST    21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST    V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST    20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST    V_ENABLELINKDOWNRST(1U)
+
 #define S_PCIE_CLIDECEN    16
 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
 #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 98a6bbd..796eb30 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -539,6 +539,31 @@
 }
 
 /**
+ *	t3_reset_qset - reset a sge qset
+ *	@q: the queue set
+ *
+ *	Reset the qset structure.
+ *	the NAPI structure is preserved in the event of
+ *	the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+	if (q->adap &&
+	    !(q->adap->flags & NAPI_INIT)) {
+		memset(q, 0, sizeof(*q));
+		return;
+	}
+
+	q->adap = NULL;
+	memset(&q->rspq, 0, sizeof(q->rspq));
+	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+	q->txq_stopped = 0;
+	memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
+}
+
+
+/**
  *	free_qset - free the resources of an SGE queue set
  *	@adapter: the adapter owning the queue set
  *	@q: the queue set
@@ -594,7 +619,7 @@
 				  q->rspq.desc, q->rspq.phys_addr);
 	}
 
-	memset(q, 0, sizeof(*q));
+	t3_reset_qset(q);
 }
 
 /**
@@ -1365,7 +1390,7 @@
  */
 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
 {
-	int ret; 
+	int ret;
 	local_bh_disable();
 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
 	local_bh_enable();
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index a99496a..d405a93 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -3264,6 +3264,7 @@
 
 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
+			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
 }
 
@@ -3655,3 +3656,30 @@
 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 			 F_GPIO0_OUT_VAL);
 }
+
+int t3_replay_prep_adapter(struct adapter *adapter)
+{
+	const struct adapter_info *ai = adapter->params.info;
+	unsigned int i, j = 0;
+	int ret;
+
+	early_hw_init(adapter, ai);
+	ret = init_parity(adapter);
+	if (ret)
+		return ret;
+
+	for_each_port(adapter, i) {
+		struct port_info *p = adap2pinfo(adapter, i);
+		while (!adapter->params.vpd.port_type[j])
+			++j;
+
+		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+					ai->mdio_ops);
+
+		p->phy.ops->power_down(&p->phy, 1);
+		++j;
+	}
+
+return 0;
+}
+
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index e6fe261..d45bcd2 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -117,6 +117,9 @@
 
 	struct mutex	 addr_lock;	/* phy and eeprom access lock */
 
+	struct delayed_work phy_poll;
+	struct net_device  *ndev;
+
 	spinlock_t lock;
 
 	struct mii_if_info mii;
@@ -297,6 +300,10 @@
 	}
 }
 
+static void dm9000_schedule_poll(board_info_t *db)
+{
+	schedule_delayed_work(&db->phy_poll, HZ * 2);
+}
 
 /* Our watchdog timed out. Called by the networking layer */
 static void dm9000_timeout(struct net_device *dev)
@@ -465,6 +472,17 @@
  	.set_eeprom		= dm9000_set_eeprom,
 };
 
+static void
+dm9000_poll_work(struct work_struct *w)
+{
+	struct delayed_work *dw = container_of(w, struct delayed_work, work);
+	board_info_t *db = container_of(dw, board_info_t, phy_poll);
+
+	mii_check_media(&db->mii, netif_msg_link(db), 0);
+	
+	if (netif_running(db->ndev))
+		dm9000_schedule_poll(db);
+}
 
 /* dm9000_release_board
  *
@@ -503,7 +521,7 @@
 /*
  * Search DM9000 board, allocate space and register it
  */
-static int
+static int __devinit
 dm9000_probe(struct platform_device *pdev)
 {
 	struct dm9000_plat_data *pdata = pdev->dev.platform_data;
@@ -525,17 +543,21 @@
 
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
-	dev_dbg(&pdev->dev, "dm9000_probe()");
+	dev_dbg(&pdev->dev, "dm9000_probe()\n");
 
 	/* setup board info structure */
 	db = (struct board_info *) ndev->priv;
 	memset(db, 0, sizeof (*db));
 
 	db->dev = &pdev->dev;
+	db->ndev = ndev;
 
 	spin_lock_init(&db->lock);
 	mutex_init(&db->addr_lock);
 
+	INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
+
+
 	if (pdev->num_resources < 2) {
 		ret = -ENODEV;
 		goto out;
@@ -761,6 +783,8 @@
 
 	mii_check_media(&db->mii, netif_msg_link(db), 1);
 	netif_start_queue(dev);
+	
+	dm9000_schedule_poll(db);
 
 	return 0;
 }
@@ -879,6 +903,8 @@
 	if (netif_msg_ifdown(db))
 		dev_dbg(db->dev, "shutting down %s\n", ndev->name);
 
+	cancel_delayed_work(&db->phy_poll);
+
 	netif_stop_queue(ndev);
 	netif_carrier_off(ndev);
 
@@ -1288,6 +1314,8 @@
 	spin_unlock_irqrestore(&db->lock,flags);
 
 	mutex_unlock(&db->addr_lock);
+
+	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
 	return ret;
 }
 
@@ -1301,6 +1329,7 @@
 	unsigned long flags;
 	unsigned long reg_save;
 
+	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
 	mutex_lock(&db->addr_lock);
 
 	spin_lock_irqsave(&db->lock,flags);
@@ -1372,7 +1401,7 @@
 	return 0;
 }
 
-static int
+static int __devexit
 dm9000_drv_remove(struct platform_device *pdev)
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1393,7 +1422,7 @@
 		.owner	 = THIS_MODULE,
 	},
 	.probe   = dm9000_probe,
-	.remove  = dm9000_drv_remove,
+	.remove  = __devexit_p(dm9000_drv_remove),
 	.suspend = dm9000_drv_suspend,
 	.resume  = dm9000_drv_resume,
 };
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f5dacce..fe872fb 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0090"
+#define DRV_VERSION	"EHEA_0091"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -118,6 +118,13 @@
 #define EHEA_MR_ACC_CTRL       0x00800000
 
 #define EHEA_BUSMAP_START      0x8000000000000000ULL
+#define EHEA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
+#define EHEA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
+#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
+#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
+#define EHEA_MAP_SIZE (0x10000)                   /* currently fixed map size */
+#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
+
 
 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
 
@@ -192,10 +199,20 @@
 				   set to 0 if unused */
 };
 
-struct ehea_busmap {
-	unsigned int entries;		/* total number of entries */
-	unsigned int valid_sections;	/* number of valid sections */
-	u64 *vaddr;
+/*
+ * Memory map data structures
+ */
+struct ehea_dir_bmap
+{
+	u64 ent[EHEA_MAP_ENTRIES];
+};
+struct ehea_top_bmap
+{
+	struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
+};
+struct ehea_bmap
+{
+	struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
 };
 
 struct ehea_qp;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f9bc21c..d1b6d4e 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -35,6 +35,7 @@
 #include <linux/if_ether.h>
 #include <linux/notifier.h>
 #include <linux/reboot.h>
+#include <linux/memory.h>
 #include <asm/kexec.h>
 #include <linux/mutex.h>
 
@@ -3503,6 +3504,24 @@
 					      0, H_DEREG_BCMC);
 }
 
+static int ehea_mem_notifier(struct notifier_block *nb,
+                             unsigned long action, void *data)
+{
+	switch (action) {
+	case MEM_OFFLINE:
+		ehea_info("memory has been removed");
+		ehea_rereg_mrs(NULL);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ehea_mem_nb = {
+	.notifier_call = ehea_mem_notifier,
+};
+
 static int ehea_reboot_notifier(struct notifier_block *nb,
 				unsigned long action, void *unused)
 {
@@ -3581,6 +3600,10 @@
 	if (ret)
 		ehea_info("failed registering reboot notifier");
 
+	ret = register_memory_notifier(&ehea_mem_nb);
+	if (ret)
+		ehea_info("failed registering memory remove notifier");
+
 	ret = crash_shutdown_register(&ehea_crash_handler);
 	if (ret)
 		ehea_info("failed registering crash handler");
@@ -3604,6 +3627,7 @@
 out3:
 	ibmebus_unregister_driver(&ehea_driver);
 out2:
+	unregister_memory_notifier(&ehea_mem_nb);
 	unregister_reboot_notifier(&ehea_reboot_nb);
 	crash_shutdown_unregister(&ehea_crash_handler);
 out:
@@ -3621,6 +3645,7 @@
 	ret = crash_shutdown_unregister(&ehea_crash_handler);
 	if (ret)
 		ehea_info("failed unregistering crash handler");
+	unregister_memory_notifier(&ehea_mem_nb);
 	kfree(ehea_fw_handles.arr);
 	kfree(ehea_bcmc_regs.arr);
 	ehea_destroy_busmap();
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index d522e90..140f05b 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,8 +31,8 @@
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
 
+struct ehea_bmap *ehea_bmap = NULL;
 
-struct ehea_busmap ehea_bmap = { 0, 0, NULL };
 
 
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -559,125 +559,253 @@
 	return 0;
 }
 
-int ehea_create_busmap(void)
+static inline int ehea_calc_index(unsigned long i, unsigned long s)
 {
-	u64 vaddr = EHEA_BUSMAP_START;
-	unsigned long high_section_index = 0;
-	int i;
+	return (i >> s) & EHEA_INDEX_MASK;
+}
 
-	/*
-	 * Sections are not in ascending order -> Loop over all sections and
-	 * find the highest PFN to compute the required map size.
-	*/
-	ehea_bmap.valid_sections = 0;
+static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
+				     int dir)
+{
+	if(!ehea_top_bmap->dir[dir]) {
+		ehea_top_bmap->dir[dir] =
+			kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
+		if (!ehea_top_bmap->dir[dir])
+			return -ENOMEM;
+	}
+	return 0;
+}
 
-	for (i = 0; i < NR_MEM_SECTIONS; i++)
-		if (valid_section_nr(i))
-			high_section_index = i;
+static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
+{
+	if(!ehea_bmap->top[top]) {
+		ehea_bmap->top[top] =
+			kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
+		if (!ehea_bmap->top[top])
+			return -ENOMEM;
+	}
+	return ehea_init_top_bmap(ehea_bmap->top[top], dir);
+}
 
-	ehea_bmap.entries = high_section_index + 1;
-	ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+static int ehea_create_busmap_callback(unsigned long pfn,
+				       unsigned long nr_pages, void *arg)
+{
+	unsigned long i, mr_len, start_section, end_section;
+	start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
+	end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
+	mr_len = *(unsigned long *)arg;
 
-	if (!ehea_bmap.vaddr)
+	ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+	if (!ehea_bmap)
 		return -ENOMEM;
 
-	for (i = 0 ; i < ehea_bmap.entries; i++) {
-		unsigned long pfn = section_nr_to_pfn(i);
+	for (i = start_section; i < end_section; i++) {
+		int ret;
+		int top, dir, idx;
+		u64 vaddr;
 
-		if (pfn_valid(pfn)) {
-			ehea_bmap.vaddr[i] = vaddr;
-			vaddr += EHEA_SECTSIZE;
-			ehea_bmap.valid_sections++;
-		} else
-			ehea_bmap.vaddr[i] = 0;
+		top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
+		dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+
+		ret = ehea_init_bmap(ehea_bmap, top, dir);
+		if(ret)
+			return ret;
+
+		idx = i & EHEA_INDEX_MASK;
+		vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
+
+		ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
 	}
 
+	mr_len += nr_pages * PAGE_SIZE;
+	*(unsigned long *)arg = mr_len;
+
 	return 0;
 }
 
+static unsigned long ehea_mr_len;
+
+static DEFINE_MUTEX(ehea_busmap_mutex);
+
+int ehea_create_busmap(void)
+{
+	int ret;
+	mutex_lock(&ehea_busmap_mutex);
+	ehea_mr_len = 0;
+	ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
+				   ehea_create_busmap_callback);
+	mutex_unlock(&ehea_busmap_mutex);
+	return ret;
+}
+
 void ehea_destroy_busmap(void)
 {
-	vfree(ehea_bmap.vaddr);
+	int top, dir;
+	mutex_lock(&ehea_busmap_mutex);
+	if (!ehea_bmap)
+		goto out_destroy;
+
+	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+		if (!ehea_bmap->top[top])
+			continue;
+
+		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+			if (!ehea_bmap->top[top]->dir[dir])
+				continue;
+
+			kfree(ehea_bmap->top[top]->dir[dir]);
+		}
+
+		kfree(ehea_bmap->top[top]);
+	}
+
+	kfree(ehea_bmap);
+	ehea_bmap = NULL;
+out_destroy:	
+	mutex_unlock(&ehea_busmap_mutex);
 }
 
 u64 ehea_map_vaddr(void *caddr)
 {
-	u64 mapped_addr;
-	unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
+	int top, dir, idx;
+	unsigned long index, offset;
 
-	if (likely(index < ehea_bmap.entries)) {
-		mapped_addr = ehea_bmap.vaddr[index];
-		if (likely(mapped_addr))
-			mapped_addr |= (((unsigned long)caddr)
-					& (EHEA_SECTSIZE - 1));
-		else
-			mapped_addr = -1;
-	} else
-		mapped_addr = -1;
+	if (!ehea_bmap)
+		return EHEA_INVAL_ADDR;
 
-	if (unlikely(mapped_addr == -1))
-		if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
-			schedule_work(&ehea_rereg_mr_task);
+	index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
+	top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
+	if (!ehea_bmap->top[top])
+		return EHEA_INVAL_ADDR;
 
-	return mapped_addr;
+	dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
+	if (!ehea_bmap->top[top]->dir[dir])
+		return EHEA_INVAL_ADDR;
+
+	idx = index & EHEA_INDEX_MASK;
+	if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+		return EHEA_INVAL_ADDR;
+
+	offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
+	return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
+}
+
+static inline void *ehea_calc_sectbase(int top, int dir, int idx)
+{
+	unsigned long ret = idx;
+	ret |= dir << EHEA_DIR_INDEX_SHIFT;
+	ret |= top << EHEA_TOP_INDEX_SHIFT;
+	return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
+			       struct ehea_adapter *adapter,
+			       struct ehea_mr *mr)
+{
+	void *pg;
+	u64 j, m, hret;
+	unsigned long k = 0;
+	u64 pt_abs = virt_to_abs(pt);
+
+	void *sectbase = ehea_calc_sectbase(top, dir, idx);
+
+	for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
+
+		for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+			pg = sectbase + ((k++) * EHEA_PAGESIZE);
+			pt[m] = virt_to_abs(pg);
+		}
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
+						0, pt_abs, EHEA_MAX_RPAGE);
+
+		if ((hret != H_SUCCESS)
+		    && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource(adapter->handle, mr->handle,
+					     FORCE_FREE);
+			ehea_error("register_rpage_mr failed");
+			return hret;
+		}
+	}
+	return hret;
+}
+
+static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
+				struct ehea_adapter *adapter,
+				struct ehea_mr *mr)
+{
+	u64 hret = H_SUCCESS;
+	int idx;
+
+	for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+		if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+			continue;
+		
+		hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+			    	return hret;
+	}
+	return hret;
+}
+
+static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
+				    struct ehea_adapter *adapter,
+				    struct ehea_mr *mr)
+{
+	u64 hret = H_SUCCESS;
+	int dir;
+
+	for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+		if (!ehea_bmap->top[top]->dir[dir])
+			continue;
+
+		hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+			    	return hret;
+	}
+	return hret;
 }
 
 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 {
 	int ret;
 	u64 *pt;
-	void *pg;
-	u64 hret, pt_abs, i, j, m, mr_len;
+	u64 hret;
 	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 
-	mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
+	unsigned long top;
 
-	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!pt) {
 		ehea_error("no mem");
 		ret = -ENOMEM;
 		goto out;
 	}
-	pt_abs = virt_to_abs(pt);
 
-	hret = ehea_h_alloc_resource_mr(adapter->handle,
-					EHEA_BUSMAP_START, mr_len,
-					acc_ctrl, adapter->pd,
+	hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
+					ehea_mr_len, acc_ctrl, adapter->pd,
 					&mr->handle, &mr->lkey);
+
 	if (hret != H_SUCCESS) {
 		ehea_error("alloc_resource_mr failed");
 		ret = -EIO;
 		goto out;
 	}
 
-	for (i = 0 ; i < ehea_bmap.entries; i++)
-		if (ehea_bmap.vaddr[i]) {
-			void *sectbase = __va(i << SECTION_SIZE_BITS);
-			unsigned long k = 0;
+	if (!ehea_bmap) {
+		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+		ehea_error("no busmap available");
+		ret = -EIO;
+		goto out;
+	}
 
-			for (j = 0; j < (EHEA_PAGES_PER_SECTION /
-					 EHEA_MAX_RPAGE); j++) {
+	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+		if (!ehea_bmap->top[top])
+			continue;
 
-				for (m = 0; m < EHEA_MAX_RPAGE; m++) {
-					pg = sectbase + ((k++) * EHEA_PAGESIZE);
-					pt[m] = virt_to_abs(pg);
-				}
-
-				hret = ehea_h_register_rpage_mr(adapter->handle,
-								mr->handle,
-								0, 0, pt_abs,
-								EHEA_MAX_RPAGE);
-				if ((hret != H_SUCCESS)
-				    && (hret != H_PAGE_REGISTERED)) {
-					ehea_h_free_resource(adapter->handle,
-							     mr->handle,
-							     FORCE_FREE);
-					ehea_error("register_rpage_mr failed");
-					ret = -EIO;
-					goto out;
-				}
-			}
-		}
+		hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
+		if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+			break;
+	}
 
 	if (hret != H_SUCCESS) {
 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6f22f06..25bdd08 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -635,6 +635,8 @@
 			dev_kfree_skb_any(priv->tx_skbuff[i]);
 			priv->tx_skbuff[i] = NULL;
 		}
+
+		txbdp++;
 	}
 
 	kfree(priv->tx_skbuff);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ef63c8d..c91b12e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -144,11 +144,13 @@
 	char *req_bytes;
 	struct myri10ge_tx_buffer_state *info;
 	int mask;		/* number of transmit slots -1  */
-	int boundary;		/* boundary transmits cannot cross */
 	int req ____cacheline_aligned;	/* transmit slots submitted     */
 	int pkt_start;		/* packets started */
+	int stop_queue;
+	int linearized;
 	int done ____cacheline_aligned;	/* transmit slots completed     */
 	int pkt_done;		/* packets completed */
+	int wake_queue;
 };
 
 struct myri10ge_rx_done {
@@ -160,29 +162,50 @@
 	struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
 };
 
-struct myri10ge_priv {
-	int running;		/* running?             */
-	int csum_flag;		/* rx_csums?            */
+struct myri10ge_slice_netstats {
+	unsigned long rx_packets;
+	unsigned long tx_packets;
+	unsigned long rx_bytes;
+	unsigned long tx_bytes;
+	unsigned long rx_dropped;
+	unsigned long tx_dropped;
+};
+
+struct myri10ge_slice_state {
 	struct myri10ge_tx_buf tx;	/* transmit ring        */
 	struct myri10ge_rx_buf rx_small;
 	struct myri10ge_rx_buf rx_big;
 	struct myri10ge_rx_done rx_done;
-	int small_bytes;
-	int big_bytes;
 	struct net_device *dev;
 	struct napi_struct napi;
+	struct myri10ge_priv *mgp;
+	struct myri10ge_slice_netstats stats;
+	__be32 __iomem *irq_claim;
+	struct mcp_irq_data *fw_stats;
+	dma_addr_t fw_stats_bus;
+	int watchdog_tx_done;
+	int watchdog_tx_req;
+};
+
+struct myri10ge_priv {
+	struct myri10ge_slice_state ss;
+	int tx_boundary;	/* boundary transmits cannot cross */
+	int running;		/* running?             */
+	int csum_flag;		/* rx_csums?            */
+	int small_bytes;
+	int big_bytes;
+	int max_intr_slots;
+	struct net_device *dev;
 	struct net_device_stats stats;
+	spinlock_t stats_lock;
 	u8 __iomem *sram;
 	int sram_size;
 	unsigned long board_span;
 	unsigned long iomem_base;
-	__be32 __iomem *irq_claim;
 	__be32 __iomem *irq_deassert;
 	char *mac_addr_string;
 	struct mcp_cmd_response *cmd;
 	dma_addr_t cmd_bus;
-	struct mcp_irq_data *fw_stats;
-	dma_addr_t fw_stats_bus;
 	struct pci_dev *pdev;
 	int msi_enabled;
 	u32 link_state;
@@ -191,20 +214,16 @@
 	__be32 __iomem *intr_coal_delay_ptr;
 	int mtrr;
 	int wc_enabled;
-	int wake_queue;
-	int stop_queue;
 	int down_cnt;
 	wait_queue_head_t down_wq;
 	struct work_struct watchdog_work;
 	struct timer_list watchdog_timer;
-	int watchdog_tx_done;
-	int watchdog_tx_req;
-	int watchdog_pause;
 	int watchdog_resets;
-	int tx_linearized;
+	int watchdog_pause;
 	int pause;
 	char *fw_name;
 	char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+	char *product_code_string;
 	char fw_version[128];
 	int fw_ver_major;
 	int fw_ver_minor;
@@ -228,58 +247,54 @@
 
 static char *myri10ge_fw_name = NULL;
 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n");
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
 
 static int myri10ge_ecrc_enable = 1;
 module_param(myri10ge_ecrc_enable, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n");
-
-static int myri10ge_max_intr_slots = 1024;
-module_param(myri10ge_max_intr_slots, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
 
 static int myri10ge_small_bytes = -1;	/* -1 == auto */
 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
 
 static int myri10ge_msi = 1;	/* enable msi by default */
 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
 
 static int myri10ge_intr_coal_delay = 75;
 module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n");
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
 
 static int myri10ge_flow_control = 1;
 module_param(myri10ge_flow_control, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n");
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
 
 static int myri10ge_deassert_wait = 1;
 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(myri10ge_deassert_wait,
-		 "Wait when deasserting legacy interrupts\n");
+		 "Wait when deasserting legacy interrupts");
 
 static int myri10ge_force_firmware = 0;
 module_param(myri10ge_force_firmware, int, S_IRUGO);
 MODULE_PARM_DESC(myri10ge_force_firmware,
-		 "Force firmware to assume aligned completions\n");
+		 "Force firmware to assume aligned completions");
 
 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
 module_param(myri10ge_initial_mtu, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
 
 static int myri10ge_napi_weight = 64;
 module_param(myri10ge_napi_weight, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n");
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
 
 static int myri10ge_watchdog_timeout = 1;
 module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n");
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
 
 static int myri10ge_max_irq_loops = 1048576;
 module_param(myri10ge_max_irq_loops, int, S_IRUGO);
 MODULE_PARM_DESC(myri10ge_max_irq_loops,
-		 "Set stuck legacy IRQ detection threshold\n");
+		 "Set stuck legacy IRQ detection threshold");
 
 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
 
@@ -289,21 +304,22 @@
 
 static int myri10ge_lro = 1;
 module_param(myri10ge_lro, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n");
+MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload");
 
 static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
 module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n");
+MODULE_PARM_DESC(myri10ge_lro_max_pkts,
+		 "Number of LRO packets to be aggregated");
 
 static int myri10ge_fill_thresh = 256;
 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
+MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
 
 static int myri10ge_reset_recover = 1;
 
 static int myri10ge_wcfifo = 0;
 module_param(myri10ge_wcfifo, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n");
+MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
 
 #define MYRI10GE_FW_OFFSET 1024*1024
 #define MYRI10GE_HIGHPART_TO_U32(X) \
@@ -359,8 +375,10 @@
 		for (sleep_total = 0;
 		     sleep_total < 1000
 		     && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
-		     sleep_total += 10)
+		     sleep_total += 10) {
 			udelay(10);
+			mb();
+		}
 	} else {
 		/* use msleep for most command */
 		for (sleep_total = 0;
@@ -420,6 +438,10 @@
 				ptr += 1;
 			}
 		}
+		if (memcmp(ptr, "PC=", 3) == 0) {
+			ptr += 3;
+			mgp->product_code_string = ptr;
+		}
 		if (memcmp((const void *)ptr, "SN=", 3) == 0) {
 			ptr += 3;
 			mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
@@ -442,7 +464,7 @@
 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
 {
 	char __iomem *submit;
-	__be32 buf[16];
+	__be32 buf[16] __attribute__ ((__aligned__(8)));
 	u32 dma_low, dma_high;
 	int i;
 
@@ -609,13 +631,38 @@
 	return status;
 }
 
+int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
+{
+	struct myri10ge_cmd cmd;
+	int status;
+
+	/* probe for IPv6 TSO support */
+	mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
+				   &cmd, 0);
+	if (status == 0) {
+		mgp->max_tso6 = cmd.data0;
+		mgp->features |= NETIF_F_TSO6;
+	}
+
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+	if (status != 0) {
+		dev_err(&mgp->pdev->dev,
+			"failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
+		return -ENXIO;
+	}
+
+	mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
+
+	return 0;
+}
+
 static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
 {
 	char __iomem *submit;
-	__be32 buf[16];
+	__be32 buf[16] __attribute__ ((__aligned__(8)));
 	u32 dma_low, dma_high, size;
 	int status, i;
-	struct myri10ge_cmd cmd;
 
 	size = 0;
 	status = myri10ge_load_hotplug_firmware(mgp, &size);
@@ -635,7 +682,7 @@
 		}
 		dev_info(&mgp->pdev->dev,
 			 "Successfully adopted running firmware\n");
-		if (mgp->tx.boundary == 4096) {
+		if (mgp->tx_boundary == 4096) {
 			dev_warn(&mgp->pdev->dev,
 				 "Using firmware currently running on NIC"
 				 ".  For optimal\n");
@@ -646,7 +693,9 @@
 		}
 
 		mgp->fw_name = "adopted";
-		mgp->tx.boundary = 2048;
+		mgp->tx_boundary = 2048;
+		myri10ge_dummy_rdma(mgp, 1);
+		status = myri10ge_get_firmware_capabilities(mgp);
 		return status;
 	}
 
@@ -681,26 +730,18 @@
 	msleep(1);
 	mb();
 	i = 0;
-	while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) {
-		msleep(1);
+	while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
+		msleep(1 << i);
 		i++;
 	}
 	if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
 		dev_err(&mgp->pdev->dev, "handoff failed\n");
 		return -ENXIO;
 	}
-	dev_info(&mgp->pdev->dev, "handoff confirmed\n");
 	myri10ge_dummy_rdma(mgp, 1);
+	status = myri10ge_get_firmware_capabilities(mgp);
 
-	/* probe for IPv6 TSO support */
-	mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
-	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
-				   &cmd, 0);
-	if (status == 0) {
-		mgp->max_tso6 = cmd.data0;
-		mgp->features |= NETIF_F_TSO6;
-	}
-	return 0;
+	return status;
 }
 
 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
@@ -772,7 +813,7 @@
 	 * transfers took to complete.
 	 */
 
-	len = mgp->tx.boundary;
+	len = mgp->tx_boundary;
 
 	cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
 	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
@@ -834,17 +875,17 @@
 
 	/* Now exchange information about interrupts  */
 
-	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
-	memset(mgp->rx_done.entry, 0, bytes);
+	bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
+	memset(mgp->ss.rx_done.entry, 0, bytes);
 	cmd.data0 = (u32) bytes;
 	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
-	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
-	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus);
 	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
 
 	status |=
 	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
-	mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
+	mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
 	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
 				    &cmd, 0);
 	mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -858,17 +899,17 @@
 	}
 	put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
 
-	memset(mgp->rx_done.entry, 0, bytes);
+	memset(mgp->ss.rx_done.entry, 0, bytes);
 
 	/* reset mcp/driver shared state back to 0 */
-	mgp->tx.req = 0;
-	mgp->tx.done = 0;
-	mgp->tx.pkt_start = 0;
-	mgp->tx.pkt_done = 0;
-	mgp->rx_big.cnt = 0;
-	mgp->rx_small.cnt = 0;
-	mgp->rx_done.idx = 0;
-	mgp->rx_done.cnt = 0;
+	mgp->ss.tx.req = 0;
+	mgp->ss.tx.done = 0;
+	mgp->ss.tx.pkt_start = 0;
+	mgp->ss.tx.pkt_done = 0;
+	mgp->ss.rx_big.cnt = 0;
+	mgp->ss.rx_small.cnt = 0;
+	mgp->ss.rx_done.idx = 0;
+	mgp->ss.rx_done.cnt = 0;
 	mgp->link_changes = 0;
 	status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
 	myri10ge_change_pause(mgp, mgp->pause);
@@ -1020,9 +1061,10 @@
 				 * page into an skb */
 
 static inline int
-myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
 		 int bytes, int len, __wsum csum)
 {
+	struct myri10ge_priv *mgp = ss->mgp;
 	struct sk_buff *skb;
 	struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
 	int i, idx, hlen, remainder;
@@ -1052,11 +1094,10 @@
 		rx_frags[0].page_offset += MXGEFW_PAD;
 		rx_frags[0].size -= MXGEFW_PAD;
 		len -= MXGEFW_PAD;
-		lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags,
+		lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
 				  len, len,
-				 /* opaque, will come back in get_frag_header */
-				  (void *)(__force unsigned long)csum,
-				  csum);
+				  /* opaque, will come back in get_frag_header */
+				  (void *)(__force unsigned long)csum, csum);
 		return 1;
 	}
 
@@ -1096,10 +1137,11 @@
 	return 1;
 }
 
-static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
+static inline void
+myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
 {
-	struct pci_dev *pdev = mgp->pdev;
-	struct myri10ge_tx_buf *tx = &mgp->tx;
+	struct pci_dev *pdev = ss->mgp->pdev;
+	struct myri10ge_tx_buf *tx = &ss->tx;
 	struct sk_buff *skb;
 	int idx, len;
 
@@ -1117,8 +1159,8 @@
 		len = pci_unmap_len(&tx->info[idx], len);
 		pci_unmap_len_set(&tx->info[idx], len, 0);
 		if (skb) {
-			mgp->stats.tx_bytes += skb->len;
-			mgp->stats.tx_packets++;
+			ss->stats.tx_bytes += skb->len;
+			ss->stats.tx_packets++;
 			dev_kfree_skb_irq(skb);
 			if (len)
 				pci_unmap_single(pdev,
@@ -1134,16 +1176,18 @@
 		}
 	}
 	/* start the queue if we've stopped it */
-	if (netif_queue_stopped(mgp->dev)
+	if (netif_queue_stopped(ss->dev)
 	    && tx->req - tx->done < (tx->mask >> 1)) {
-		mgp->wake_queue++;
-		netif_wake_queue(mgp->dev);
+		tx->wake_queue++;
+		netif_wake_queue(ss->dev);
 	}
 }
 
-static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget)
+static inline int
+myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
 {
-	struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+	struct myri10ge_rx_done *rx_done = &ss->rx_done;
+	struct myri10ge_priv *mgp = ss->mgp;
 	unsigned long rx_bytes = 0;
 	unsigned long rx_packets = 0;
 	unsigned long rx_ok;
@@ -1159,40 +1203,40 @@
 		rx_done->entry[idx].length = 0;
 		checksum = csum_unfold(rx_done->entry[idx].checksum);
 		if (length <= mgp->small_bytes)
-			rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
+			rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
 						 mgp->small_bytes,
 						 length, checksum);
 		else
-			rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
+			rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
 						 mgp->big_bytes,
 						 length, checksum);
 		rx_packets += rx_ok;
 		rx_bytes += rx_ok * (unsigned long)length;
 		cnt++;
-		idx = cnt & (myri10ge_max_intr_slots - 1);
+		idx = cnt & (mgp->max_intr_slots - 1);
 		work_done++;
 	}
 	rx_done->idx = idx;
 	rx_done->cnt = cnt;
-	mgp->stats.rx_packets += rx_packets;
-	mgp->stats.rx_bytes += rx_bytes;
+	ss->stats.rx_packets += rx_packets;
+	ss->stats.rx_bytes += rx_bytes;
 
 	if (myri10ge_lro)
 		lro_flush_all(&rx_done->lro_mgr);
 
 	/* restock receive rings if needed */
-	if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh)
-		myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+	if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
+		myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
 					mgp->small_bytes + MXGEFW_PAD, 0);
-	if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh)
-		myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
+	if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
+		myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
 
 	return work_done;
 }
 
 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
 {
-	struct mcp_irq_data *stats = mgp->fw_stats;
+	struct mcp_irq_data *stats = mgp->ss.fw_stats;
 
 	if (unlikely(stats->stats_updated)) {
 		unsigned link_up = ntohl(stats->link_up);
@@ -1219,9 +1263,9 @@
 			}
 		}
 		if (mgp->rdma_tags_available !=
-		    ntohl(mgp->fw_stats->rdma_tags_available)) {
+		    ntohl(stats->rdma_tags_available)) {
 			mgp->rdma_tags_available =
-			    ntohl(mgp->fw_stats->rdma_tags_available);
+			    ntohl(stats->rdma_tags_available);
 			printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
 			       "%d tags left\n", mgp->dev->name,
 			       mgp->rdma_tags_available);
@@ -1234,26 +1278,27 @@
 
 static int myri10ge_poll(struct napi_struct *napi, int budget)
 {
-	struct myri10ge_priv *mgp =
-	    container_of(napi, struct myri10ge_priv, napi);
-	struct net_device *netdev = mgp->dev;
+	struct myri10ge_slice_state *ss =
+	    container_of(napi, struct myri10ge_slice_state, napi);
+	struct net_device *netdev = ss->mgp->dev;
 	int work_done;
 
 	/* process as many rx events as NAPI will allow */
-	work_done = myri10ge_clean_rx_done(mgp, budget);
+	work_done = myri10ge_clean_rx_done(ss, budget);
 
 	if (work_done < budget) {
 		netif_rx_complete(netdev, napi);
-		put_be32(htonl(3), mgp->irq_claim);
+		put_be32(htonl(3), ss->irq_claim);
 	}
 	return work_done;
 }
 
 static irqreturn_t myri10ge_intr(int irq, void *arg)
 {
-	struct myri10ge_priv *mgp = arg;
-	struct mcp_irq_data *stats = mgp->fw_stats;
-	struct myri10ge_tx_buf *tx = &mgp->tx;
+	struct myri10ge_slice_state *ss = arg;
+	struct myri10ge_priv *mgp = ss->mgp;
+	struct mcp_irq_data *stats = ss->fw_stats;
+	struct myri10ge_tx_buf *tx = &ss->tx;
 	u32 send_done_count;
 	int i;
 
@@ -1264,7 +1309,7 @@
 	/* low bit indicates receives are present, so schedule
 	 * napi poll handler */
 	if (stats->valid & 1)
-		netif_rx_schedule(mgp->dev, &mgp->napi);
+		netif_rx_schedule(ss->dev, &ss->napi);
 
 	if (!mgp->msi_enabled) {
 		put_be32(0, mgp->irq_deassert);
@@ -1281,7 +1326,7 @@
 		/* check for transmit completes and receives */
 		send_done_count = ntohl(stats->send_done_count);
 		if (send_done_count != tx->pkt_done)
-			myri10ge_tx_done(mgp, (int)send_done_count);
+			myri10ge_tx_done(ss, (int)send_done_count);
 		if (unlikely(i > myri10ge_max_irq_loops)) {
 			printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
 			       mgp->dev->name);
@@ -1296,16 +1341,46 @@
 
 	myri10ge_check_statblock(mgp);
 
-	put_be32(htonl(3), mgp->irq_claim + 1);
+	put_be32(htonl(3), ss->irq_claim + 1);
 	return (IRQ_HANDLED);
 }
 
 static int
 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 {
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	char *ptr;
+	int i;
+
 	cmd->autoneg = AUTONEG_DISABLE;
 	cmd->speed = SPEED_10000;
 	cmd->duplex = DUPLEX_FULL;
+
+	/*
+	 * parse the product code to deterimine the interface type
+	 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
+	 * after the 3rd dash in the driver's cached copy of the
+	 * EEPROM's product code string.
+	 */
+	ptr = mgp->product_code_string;
+	if (ptr == NULL) {
+		printk(KERN_ERR "myri10ge: %s: Missing product code\n",
+		       netdev->name);
+		return 0;
+	}
+	for (i = 0; i < 3; i++, ptr++) {
+		ptr = strchr(ptr, '-');
+		if (ptr == NULL) {
+			printk(KERN_ERR "myri10ge: %s: Invalid product "
+			       "code %s\n", netdev->name,
+			       mgp->product_code_string);
+			return 0;
+		}
+	}
+	if (*ptr == 'R' || *ptr == 'Q') {
+		/* We've found either an XFP or quad ribbon fiber */
+		cmd->port = PORT_FIBRE;
+	}
 	return 0;
 }
 
@@ -1324,6 +1399,7 @@
 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
 	coal->rx_coalesce_usecs = mgp->intr_coal_delay;
 	return 0;
 }
@@ -1370,10 +1446,10 @@
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
 
-	ring->rx_mini_max_pending = mgp->rx_small.mask + 1;
-	ring->rx_max_pending = mgp->rx_big.mask + 1;
+	ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1;
+	ring->rx_max_pending = mgp->ss.rx_big.mask + 1;
 	ring->rx_jumbo_max_pending = 0;
-	ring->tx_max_pending = mgp->rx_small.mask + 1;
+	ring->tx_max_pending = mgp->ss.rx_small.mask + 1;
 	ring->rx_mini_pending = ring->rx_mini_max_pending;
 	ring->rx_pending = ring->rx_max_pending;
 	ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1383,6 +1459,7 @@
 static u32 myri10ge_get_rx_csum(struct net_device *netdev)
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
 	if (mgp->csum_flag)
 		return 1;
 	else
@@ -1392,6 +1469,7 @@
 static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
 	if (csum_enabled)
 		mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
 	else
@@ -1411,7 +1489,7 @@
 	return 0;
 }
 
-static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
+static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
@@ -1421,28 +1499,39 @@
 	/* device-specific stats */
 	"tx_boundary", "WC", "irq", "MSI",
 	"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
-	"serial_number", "tx_pkt_start", "tx_pkt_done",
-	"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
-	"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
+	"serial_number", "watchdog_resets",
 	"link_changes", "link_up", "dropped_link_overflow",
 	"dropped_link_error_or_filtered",
 	"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
 	"dropped_unicast_filtered", "dropped_multicast_filtered",
 	"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
-	"dropped_no_big_buffer", "LRO aggregated", "LRO flushed",
+	"dropped_no_big_buffer"
+};
+
+static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
+	"----------- slice ---------",
+	"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
+	"rx_small_cnt", "rx_big_cnt",
+	"wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
+	    "LRO flushed",
 	"LRO avg aggr", "LRO no_desc"
 };
 
 #define MYRI10GE_NET_STATS_LEN      21
-#define MYRI10GE_STATS_LEN	ARRAY_SIZE(myri10ge_gstrings_stats)
+#define MYRI10GE_MAIN_STATS_LEN  ARRAY_SIZE(myri10ge_gstrings_main_stats)
+#define MYRI10GE_SLICE_STATS_LEN  ARRAY_SIZE(myri10ge_gstrings_slice_stats)
 
 static void
 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
 {
 	switch (stringset) {
 	case ETH_SS_STATS:
-		memcpy(data, *myri10ge_gstrings_stats,
-		       sizeof(myri10ge_gstrings_stats));
+		memcpy(data, *myri10ge_gstrings_main_stats,
+		       sizeof(myri10ge_gstrings_main_stats));
+		data += sizeof(myri10ge_gstrings_main_stats);
+		memcpy(data, *myri10ge_gstrings_slice_stats,
+		       sizeof(myri10ge_gstrings_slice_stats));
+		data += sizeof(myri10ge_gstrings_slice_stats);
 		break;
 	}
 }
@@ -1451,7 +1540,7 @@
 {
 	switch (sset) {
 	case ETH_SS_STATS:
-		return MYRI10GE_STATS_LEN;
+		return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -1462,12 +1551,13 @@
 			   struct ethtool_stats *stats, u64 * data)
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	struct myri10ge_slice_state *ss;
 	int i;
 
 	for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
 		data[i] = ((unsigned long *)&mgp->stats)[i];
 
-	data[i++] = (unsigned int)mgp->tx.boundary;
+	data[i++] = (unsigned int)mgp->tx_boundary;
 	data[i++] = (unsigned int)mgp->wc_enabled;
 	data[i++] = (unsigned int)mgp->pdev->irq;
 	data[i++] = (unsigned int)mgp->msi_enabled;
@@ -1475,40 +1565,44 @@
 	data[i++] = (unsigned int)mgp->write_dma;
 	data[i++] = (unsigned int)mgp->read_write_dma;
 	data[i++] = (unsigned int)mgp->serial_number;
-	data[i++] = (unsigned int)mgp->tx.pkt_start;
-	data[i++] = (unsigned int)mgp->tx.pkt_done;
-	data[i++] = (unsigned int)mgp->tx.req;
-	data[i++] = (unsigned int)mgp->tx.done;
-	data[i++] = (unsigned int)mgp->rx_small.cnt;
-	data[i++] = (unsigned int)mgp->rx_big.cnt;
-	data[i++] = (unsigned int)mgp->wake_queue;
-	data[i++] = (unsigned int)mgp->stop_queue;
 	data[i++] = (unsigned int)mgp->watchdog_resets;
-	data[i++] = (unsigned int)mgp->tx_linearized;
 	data[i++] = (unsigned int)mgp->link_changes;
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
+
+	/* firmware stats are useful only in the first slice */
+	ss = &mgp->ss;
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
 	data[i++] =
-	    (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32);
+	    (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
 	data[i++] =
-	    (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered);
-	data[i++] =
-	    (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
-	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
-	data[i++] = mgp->rx_done.lro_mgr.stats.aggregated;
-	data[i++] = mgp->rx_done.lro_mgr.stats.flushed;
-	if (mgp->rx_done.lro_mgr.stats.flushed)
-		data[i++] = mgp->rx_done.lro_mgr.stats.aggregated /
-		    mgp->rx_done.lro_mgr.stats.flushed;
+	    (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
+	data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
+
+	data[i++] = 0;
+	data[i++] = (unsigned int)ss->tx.pkt_start;
+	data[i++] = (unsigned int)ss->tx.pkt_done;
+	data[i++] = (unsigned int)ss->tx.req;
+	data[i++] = (unsigned int)ss->tx.done;
+	data[i++] = (unsigned int)ss->rx_small.cnt;
+	data[i++] = (unsigned int)ss->rx_big.cnt;
+	data[i++] = (unsigned int)ss->tx.wake_queue;
+	data[i++] = (unsigned int)ss->tx.stop_queue;
+	data[i++] = (unsigned int)ss->tx.linearized;
+	data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
+	data[i++] = ss->rx_done.lro_mgr.stats.flushed;
+	if (ss->rx_done.lro_mgr.stats.flushed)
+		data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
+		    ss->rx_done.lro_mgr.stats.flushed;
 	else
 		data[i++] = 0;
-	data[i++] = mgp->rx_done.lro_mgr.stats.no_desc;
+	data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
 }
 
 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1544,19 +1638,17 @@
 	.get_msglevel = myri10ge_get_msglevel
 };
 
-static int myri10ge_allocate_rings(struct net_device *dev)
+static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
 {
-	struct myri10ge_priv *mgp;
+	struct myri10ge_priv *mgp = ss->mgp;
 	struct myri10ge_cmd cmd;
+	struct net_device *dev = mgp->dev;
 	int tx_ring_size, rx_ring_size;
 	int tx_ring_entries, rx_ring_entries;
 	int i, status;
 	size_t bytes;
 
-	mgp = netdev_priv(dev);
-
 	/* get ring sizes */
-
 	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
 	tx_ring_size = cmd.data0;
 	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
@@ -1566,144 +1658,142 @@
 
 	tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
 	rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
-	mgp->tx.mask = tx_ring_entries - 1;
-	mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
+	ss->tx.mask = tx_ring_entries - 1;
+	ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
 
 	status = -ENOMEM;
 
 	/* allocate the host shadow rings */
 
 	bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
-	    * sizeof(*mgp->tx.req_list);
-	mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->tx.req_bytes == NULL)
+	    * sizeof(*ss->tx.req_list);
+	ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+	if (ss->tx.req_bytes == NULL)
 		goto abort_with_nothing;
 
 	/* ensure req_list entries are aligned to 8 bytes */
-	mgp->tx.req_list = (struct mcp_kreq_ether_send *)
-	    ALIGN((unsigned long)mgp->tx.req_bytes, 8);
+	ss->tx.req_list = (struct mcp_kreq_ether_send *)
+	    ALIGN((unsigned long)ss->tx.req_bytes, 8);
 
-	bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);
-	mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->rx_small.shadow == NULL)
+	bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
+	ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+	if (ss->rx_small.shadow == NULL)
 		goto abort_with_tx_req_bytes;
 
-	bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);
-	mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->rx_big.shadow == NULL)
+	bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
+	ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+	if (ss->rx_big.shadow == NULL)
 		goto abort_with_rx_small_shadow;
 
 	/* allocate the host info rings */
 
-	bytes = tx_ring_entries * sizeof(*mgp->tx.info);
-	mgp->tx.info = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->tx.info == NULL)
+	bytes = tx_ring_entries * sizeof(*ss->tx.info);
+	ss->tx.info = kzalloc(bytes, GFP_KERNEL);
+	if (ss->tx.info == NULL)
 		goto abort_with_rx_big_shadow;
 
-	bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);
-	mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->rx_small.info == NULL)
+	bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
+	ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+	if (ss->rx_small.info == NULL)
 		goto abort_with_tx_info;
 
-	bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);
-	mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);
-	if (mgp->rx_big.info == NULL)
+	bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
+	ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+	if (ss->rx_big.info == NULL)
 		goto abort_with_rx_small_info;
 
 	/* Fill the receive rings */
-	mgp->rx_big.cnt = 0;
-	mgp->rx_small.cnt = 0;
-	mgp->rx_big.fill_cnt = 0;
-	mgp->rx_small.fill_cnt = 0;
-	mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
-	mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
-	mgp->rx_small.watchdog_needed = 0;
-	mgp->rx_big.watchdog_needed = 0;
-	myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+	ss->rx_big.cnt = 0;
+	ss->rx_small.cnt = 0;
+	ss->rx_big.fill_cnt = 0;
+	ss->rx_small.fill_cnt = 0;
+	ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
+	ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
+	ss->rx_small.watchdog_needed = 0;
+	ss->rx_big.watchdog_needed = 0;
+	myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
 				mgp->small_bytes + MXGEFW_PAD, 0);
 
-	if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {
+	if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
 		printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
-		       dev->name, mgp->rx_small.fill_cnt);
+		       dev->name, ss->rx_small.fill_cnt);
 		goto abort_with_rx_small_ring;
 	}
 
-	myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
-	if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {
+	myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
+	if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
 		printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
-		       dev->name, mgp->rx_big.fill_cnt);
+		       dev->name, ss->rx_big.fill_cnt);
 		goto abort_with_rx_big_ring;
 	}
 
 	return 0;
 
 abort_with_rx_big_ring:
-	for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
-		int idx = i & mgp->rx_big.mask;
-		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+	for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+		int idx = i & ss->rx_big.mask;
+		myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
 				       mgp->big_bytes);
-		put_page(mgp->rx_big.info[idx].page);
+		put_page(ss->rx_big.info[idx].page);
 	}
 
 abort_with_rx_small_ring:
-	for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
-		int idx = i & mgp->rx_small.mask;
-		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+	for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+		int idx = i & ss->rx_small.mask;
+		myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
 				       mgp->small_bytes + MXGEFW_PAD);
-		put_page(mgp->rx_small.info[idx].page);
+		put_page(ss->rx_small.info[idx].page);
 	}
 
-	kfree(mgp->rx_big.info);
+	kfree(ss->rx_big.info);
 
 abort_with_rx_small_info:
-	kfree(mgp->rx_small.info);
+	kfree(ss->rx_small.info);
 
 abort_with_tx_info:
-	kfree(mgp->tx.info);
+	kfree(ss->tx.info);
 
 abort_with_rx_big_shadow:
-	kfree(mgp->rx_big.shadow);
+	kfree(ss->rx_big.shadow);
 
 abort_with_rx_small_shadow:
-	kfree(mgp->rx_small.shadow);
+	kfree(ss->rx_small.shadow);
 
 abort_with_tx_req_bytes:
-	kfree(mgp->tx.req_bytes);
-	mgp->tx.req_bytes = NULL;
-	mgp->tx.req_list = NULL;
+	kfree(ss->tx.req_bytes);
+	ss->tx.req_bytes = NULL;
+	ss->tx.req_list = NULL;
 
 abort_with_nothing:
 	return status;
 }
 
-static void myri10ge_free_rings(struct net_device *dev)
+static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
 {
-	struct myri10ge_priv *mgp;
+	struct myri10ge_priv *mgp = ss->mgp;
 	struct sk_buff *skb;
 	struct myri10ge_tx_buf *tx;
 	int i, len, idx;
 
-	mgp = netdev_priv(dev);
-
-	for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
-		idx = i & mgp->rx_big.mask;
-		if (i == mgp->rx_big.fill_cnt - 1)
-			mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
-		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+	for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+		idx = i & ss->rx_big.mask;
+		if (i == ss->rx_big.fill_cnt - 1)
+			ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
+		myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
 				       mgp->big_bytes);
-		put_page(mgp->rx_big.info[idx].page);
+		put_page(ss->rx_big.info[idx].page);
 	}
 
-	for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
-		idx = i & mgp->rx_small.mask;
-		if (i == mgp->rx_small.fill_cnt - 1)
-			mgp->rx_small.info[idx].page_offset =
+	for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+		idx = i & ss->rx_small.mask;
+		if (i == ss->rx_small.fill_cnt - 1)
+			ss->rx_small.info[idx].page_offset =
 			    MYRI10GE_ALLOC_SIZE;
-		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+		myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
 				       mgp->small_bytes + MXGEFW_PAD);
-		put_page(mgp->rx_small.info[idx].page);
+		put_page(ss->rx_small.info[idx].page);
 	}
-	tx = &mgp->tx;
+	tx = &ss->tx;
 	while (tx->done != tx->req) {
 		idx = tx->done & tx->mask;
 		skb = tx->info[idx].skb;
@@ -1714,7 +1804,7 @@
 		len = pci_unmap_len(&tx->info[idx], len);
 		pci_unmap_len_set(&tx->info[idx], len, 0);
 		if (skb) {
-			mgp->stats.tx_dropped++;
+			ss->stats.tx_dropped++;
 			dev_kfree_skb_any(skb);
 			if (len)
 				pci_unmap_single(mgp->pdev,
@@ -1729,19 +1819,19 @@
 					       PCI_DMA_TODEVICE);
 		}
 	}
-	kfree(mgp->rx_big.info);
+	kfree(ss->rx_big.info);
 
-	kfree(mgp->rx_small.info);
+	kfree(ss->rx_small.info);
 
-	kfree(mgp->tx.info);
+	kfree(ss->tx.info);
 
-	kfree(mgp->rx_big.shadow);
+	kfree(ss->rx_big.shadow);
 
-	kfree(mgp->rx_small.shadow);
+	kfree(ss->rx_small.shadow);
 
-	kfree(mgp->tx.req_bytes);
-	mgp->tx.req_bytes = NULL;
-	mgp->tx.req_list = NULL;
+	kfree(ss->tx.req_bytes);
+	ss->tx.req_bytes = NULL;
+	ss->tx.req_list = NULL;
 }
 
 static int myri10ge_request_irq(struct myri10ge_priv *mgp)
@@ -1840,13 +1930,11 @@
 
 static int myri10ge_open(struct net_device *dev)
 {
-	struct myri10ge_priv *mgp;
+	struct myri10ge_priv *mgp = netdev_priv(dev);
 	struct myri10ge_cmd cmd;
 	struct net_lro_mgr *lro_mgr;
 	int status, big_pow2;
 
-	mgp = netdev_priv(dev);
-
 	if (mgp->running != MYRI10GE_ETH_STOPPED)
 		return -EBUSY;
 
@@ -1883,16 +1971,16 @@
 	/* get the lanai pointers to the send and receive rings */
 
 	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
-	mgp->tx.lanai =
+	mgp->ss.tx.lanai =
 	    (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
 
 	status |=
 	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
-	mgp->rx_small.lanai =
+	mgp->ss.rx_small.lanai =
 	    (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
 
 	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
-	mgp->rx_big.lanai =
+	mgp->ss.rx_big.lanai =
 	    (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
 
 	if (status != 0) {
@@ -1904,15 +1992,15 @@
 	}
 
 	if (myri10ge_wcfifo && mgp->wc_enabled) {
-		mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
-		mgp->rx_small.wc_fifo =
+		mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
+		mgp->ss.rx_small.wc_fifo =
 		    (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
-		mgp->rx_big.wc_fifo =
+		mgp->ss.rx_big.wc_fifo =
 		    (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
 	} else {
-		mgp->tx.wc_fifo = NULL;
-		mgp->rx_small.wc_fifo = NULL;
-		mgp->rx_big.wc_fifo = NULL;
+		mgp->ss.tx.wc_fifo = NULL;
+		mgp->ss.rx_small.wc_fifo = NULL;
+		mgp->ss.rx_big.wc_fifo = NULL;
 	}
 
 	/* Firmware needs the big buff size as a power of 2.  Lie and
@@ -1929,7 +2017,7 @@
 		mgp->big_bytes = big_pow2;
 	}
 
-	status = myri10ge_allocate_rings(dev);
+	status = myri10ge_allocate_rings(&mgp->ss);
 	if (status != 0)
 		goto abort_with_irq;
 
@@ -1948,12 +2036,12 @@
 		goto abort_with_rings;
 	}
 
-	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
-	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus);
 	cmd.data2 = sizeof(struct mcp_irq_data);
 	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
 	if (status == -ENOSYS) {
-		dma_addr_t bus = mgp->fw_stats_bus;
+		dma_addr_t bus = mgp->ss.fw_stats_bus;
 		bus += offsetof(struct mcp_irq_data, send_done_count);
 		cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
 		cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
@@ -1974,20 +2062,20 @@
 	mgp->link_state = ~0U;
 	mgp->rdma_tags_available = 15;
 
-	lro_mgr = &mgp->rx_done.lro_mgr;
+	lro_mgr = &mgp->ss.rx_done.lro_mgr;
 	lro_mgr->dev = dev;
 	lro_mgr->features = LRO_F_NAPI;
 	lro_mgr->ip_summed = CHECKSUM_COMPLETE;
 	lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
 	lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
-	lro_mgr->lro_arr = mgp->rx_done.lro_desc;
+	lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
 	lro_mgr->get_frag_header = myri10ge_get_frag_header;
 	lro_mgr->max_aggr = myri10ge_lro_max_pkts;
 	lro_mgr->frag_align_pad = 2;
 	if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
 		lro_mgr->max_aggr = MAX_SKB_FRAGS;
 
-	napi_enable(&mgp->napi);	/* must happen prior to any irq */
+	napi_enable(&mgp->ss.napi);	/* must happen prior to any irq */
 
 	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
 	if (status) {
@@ -1996,8 +2084,8 @@
 		goto abort_with_rings;
 	}
 
-	mgp->wake_queue = 0;
-	mgp->stop_queue = 0;
+	mgp->ss.tx.wake_queue = 0;
+	mgp->ss.tx.stop_queue = 0;
 	mgp->running = MYRI10GE_ETH_RUNNING;
 	mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
 	add_timer(&mgp->watchdog_timer);
@@ -2005,7 +2093,7 @@
 	return 0;
 
 abort_with_rings:
-	myri10ge_free_rings(dev);
+	myri10ge_free_rings(&mgp->ss);
 
 abort_with_irq:
 	myri10ge_free_irq(mgp);
@@ -2017,21 +2105,19 @@
 
 static int myri10ge_close(struct net_device *dev)
 {
-	struct myri10ge_priv *mgp;
+	struct myri10ge_priv *mgp = netdev_priv(dev);
 	struct myri10ge_cmd cmd;
 	int status, old_down_cnt;
 
-	mgp = netdev_priv(dev);
-
 	if (mgp->running != MYRI10GE_ETH_RUNNING)
 		return 0;
 
-	if (mgp->tx.req_bytes == NULL)
+	if (mgp->ss.tx.req_bytes == NULL)
 		return 0;
 
 	del_timer_sync(&mgp->watchdog_timer);
 	mgp->running = MYRI10GE_ETH_STOPPING;
-	napi_disable(&mgp->napi);
+	napi_disable(&mgp->ss.napi);
 	netif_carrier_off(dev);
 	netif_stop_queue(dev);
 	old_down_cnt = mgp->down_cnt;
@@ -2047,7 +2133,7 @@
 
 	netif_tx_disable(dev);
 	myri10ge_free_irq(mgp);
-	myri10ge_free_rings(dev);
+	myri10ge_free_rings(&mgp->ss);
 
 	mgp->running = MYRI10GE_ETH_STOPPED;
 	return 0;
@@ -2143,7 +2229,7 @@
 
 /*
  * Transmit a packet.  We need to split the packet so that a single
- * segment does not cross myri10ge->tx.boundary, so this makes segment
+ * segment does not cross myri10ge->tx_boundary, so this makes segment
  * counting tricky.  So rather than try to count segments up front, we
  * just give up if there are too few segments to hold a reasonably
  * fragmented packet currently available.  If we run
@@ -2154,8 +2240,9 @@
 static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct myri10ge_priv *mgp = netdev_priv(dev);
+	struct myri10ge_slice_state *ss;
 	struct mcp_kreq_ether_send *req;
-	struct myri10ge_tx_buf *tx = &mgp->tx;
+	struct myri10ge_tx_buf *tx;
 	struct skb_frag_struct *frag;
 	dma_addr_t bus;
 	u32 low;
@@ -2166,6 +2253,9 @@
 	int cum_len, seglen, boundary, rdma_count;
 	u8 flags, odd_flag;
 
+	/* always transmit through slot 0 */
+	ss = &mgp->ss;
+	tx = &ss->tx;
 again:
 	req = tx->req_list;
 	avail = tx->mask - 1 - (tx->req - tx->done);
@@ -2180,7 +2270,7 @@
 
 	if ((unlikely(avail < max_segments))) {
 		/* we are out of transmit resources */
-		mgp->stop_queue++;
+		tx->stop_queue++;
 		netif_stop_queue(dev);
 		return 1;
 	}
@@ -2242,7 +2332,7 @@
 			if (skb_padto(skb, ETH_ZLEN)) {
 				/* The packet is gone, so we must
 				 * return 0 */
-				mgp->stats.tx_dropped += 1;
+				ss->stats.tx_dropped += 1;
 				return 0;
 			}
 			/* adjust the len to account for the zero pad
@@ -2284,7 +2374,7 @@
 
 	while (1) {
 		/* Break the SKB or Fragment up into pieces which
-		 * do not cross mgp->tx.boundary */
+		 * do not cross mgp->tx_boundary */
 		low = MYRI10GE_LOWPART_TO_U32(bus);
 		high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
 		while (len) {
@@ -2294,7 +2384,8 @@
 			if (unlikely(count == max_segments))
 				goto abort_linearize;
 
-			boundary = (low + tx->boundary) & ~(tx->boundary - 1);
+			boundary =
+			    (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
 			seglen = boundary - low;
 			if (seglen > len)
 				seglen = len;
@@ -2378,7 +2469,7 @@
 		myri10ge_submit_req_wc(tx, tx->req_list, count);
 	tx->pkt_start++;
 	if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
-		mgp->stop_queue++;
+		tx->stop_queue++;
 		netif_stop_queue(dev);
 	}
 	dev->trans_start = jiffies;
@@ -2420,12 +2511,12 @@
 	if (skb_linearize(skb))
 		goto drop;
 
-	mgp->tx_linearized++;
+	tx->linearized++;
 	goto again;
 
 drop:
 	dev_kfree_skb_any(skb);
-	mgp->stats.tx_dropped += 1;
+	ss->stats.tx_dropped += 1;
 	return 0;
 
 }
@@ -2433,7 +2524,7 @@
 static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
 {
 	struct sk_buff *segs, *curr;
-	struct myri10ge_priv *mgp = dev->priv;
+	struct myri10ge_priv *mgp = netdev_priv(dev);
 	int status;
 
 	segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
@@ -2473,14 +2564,13 @@
 
 static void myri10ge_set_multicast_list(struct net_device *dev)
 {
+	struct myri10ge_priv *mgp = netdev_priv(dev);
 	struct myri10ge_cmd cmd;
-	struct myri10ge_priv *mgp;
 	struct dev_mc_list *mc_list;
 	__be32 data[2] = { 0, 0 };
 	int err;
 	DECLARE_MAC_BUF(mac);
 
-	mgp = netdev_priv(dev);
 	/* can be called from atomic contexts,
 	 * pass 1 to force atomicity in myri10ge_send_cmd() */
 	myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
@@ -2616,13 +2706,14 @@
 	ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
 	if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
 		if (myri10ge_ecrc_enable > 1) {
-			struct pci_dev *old_bridge = bridge;
+			struct pci_dev *prev_bridge, *old_bridge = bridge;
 
 			/* Walk the hierarchy up to the root port
 			 * where ECRC has to be enabled */
 			do {
+				prev_bridge = bridge;
 				bridge = bridge->bus->self;
-				if (!bridge) {
+				if (!bridge || prev_bridge == bridge) {
 					dev_err(dev,
 						"Failed to find root port"
 						" to force ECRC\n");
@@ -2681,9 +2772,9 @@
  * already been enabled, then it must use a firmware image which works
  * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
  * should also ensure that it never gives the device a Read-DMA which is
- * larger than 2KB by setting the tx.boundary to 2KB.  If ECRC is
+ * larger than 2KB by setting the tx_boundary to 2KB.  If ECRC is
  * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
- * firmware image, and set tx.boundary to 4KB.
+ * firmware image, and set tx_boundary to 4KB.
  */
 
 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
@@ -2692,7 +2783,7 @@
 	struct device *dev = &pdev->dev;
 	int status;
 
-	mgp->tx.boundary = 4096;
+	mgp->tx_boundary = 4096;
 	/*
 	 * Verify the max read request size was set to 4KB
 	 * before trying the test with 4KB.
@@ -2704,7 +2795,7 @@
 	}
 	if (status != 4096) {
 		dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
-		mgp->tx.boundary = 2048;
+		mgp->tx_boundary = 2048;
 	}
 	/*
 	 * load the optimized firmware (which assumes aligned PCIe
@@ -2737,7 +2828,7 @@
 			 "Please install up to date fw\n");
 abort:
 	/* fall back to using the unaligned firmware */
-	mgp->tx.boundary = 2048;
+	mgp->tx_boundary = 2048;
 	mgp->fw_name = myri10ge_fw_unaligned;
 
 }
@@ -2758,7 +2849,7 @@
 		if (link_width < 8) {
 			dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
 				 link_width);
-			mgp->tx.boundary = 4096;
+			mgp->tx_boundary = 4096;
 			mgp->fw_name = myri10ge_fw_aligned;
 		} else {
 			myri10ge_firmware_probe(mgp);
@@ -2767,12 +2858,12 @@
 		if (myri10ge_force_firmware == 1) {
 			dev_info(&mgp->pdev->dev,
 				 "Assuming aligned completions (forced)\n");
-			mgp->tx.boundary = 4096;
+			mgp->tx_boundary = 4096;
 			mgp->fw_name = myri10ge_fw_aligned;
 		} else {
 			dev_info(&mgp->pdev->dev,
 				 "Assuming unaligned completions (forced)\n");
-			mgp->tx.boundary = 2048;
+			mgp->tx_boundary = 2048;
 			mgp->fw_name = myri10ge_fw_unaligned;
 		}
 	}
@@ -2889,6 +2980,7 @@
 {
 	struct myri10ge_priv *mgp =
 	    container_of(work, struct myri10ge_priv, watchdog_work);
+	struct myri10ge_tx_buf *tx;
 	u32 reboot;
 	int status;
 	u16 cmd, vendor;
@@ -2938,15 +3030,16 @@
 
 		printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
 		       mgp->dev->name);
+		tx = &mgp->ss.tx;
 		printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
-		       mgp->dev->name, mgp->tx.req, mgp->tx.done,
-		       mgp->tx.pkt_start, mgp->tx.pkt_done,
-		       (int)ntohl(mgp->fw_stats->send_done_count));
+		       mgp->dev->name, tx->req, tx->done,
+		       tx->pkt_start, tx->pkt_done,
+		       (int)ntohl(mgp->ss.fw_stats->send_done_count));
 		msleep(2000);
 		printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
-		       mgp->dev->name, mgp->tx.req, mgp->tx.done,
-		       mgp->tx.pkt_start, mgp->tx.pkt_done,
-		       (int)ntohl(mgp->fw_stats->send_done_count));
+		       mgp->dev->name, tx->req, tx->done,
+		       tx->pkt_start, tx->pkt_done,
+		       (int)ntohl(mgp->ss.fw_stats->send_done_count));
 	}
 	rtnl_lock();
 	myri10ge_close(mgp->dev);
@@ -2969,28 +3062,31 @@
 static void myri10ge_watchdog_timer(unsigned long arg)
 {
 	struct myri10ge_priv *mgp;
+	struct myri10ge_slice_state *ss;
 	u32 rx_pause_cnt;
 
 	mgp = (struct myri10ge_priv *)arg;
 
-	if (mgp->rx_small.watchdog_needed) {
-		myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
-					mgp->small_bytes + MXGEFW_PAD, 1);
-		if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >=
-		    myri10ge_fill_thresh)
-			mgp->rx_small.watchdog_needed = 0;
-	}
-	if (mgp->rx_big.watchdog_needed) {
-		myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1);
-		if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >=
-		    myri10ge_fill_thresh)
-			mgp->rx_big.watchdog_needed = 0;
-	}
-	rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause);
+	rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause);
 
-	if (mgp->tx.req != mgp->tx.done &&
-	    mgp->tx.done == mgp->watchdog_tx_done &&
-	    mgp->watchdog_tx_req != mgp->watchdog_tx_done) {
+	ss = &mgp->ss;
+	if (ss->rx_small.watchdog_needed) {
+		myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
+					mgp->small_bytes + MXGEFW_PAD, 1);
+		if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
+		    myri10ge_fill_thresh)
+			ss->rx_small.watchdog_needed = 0;
+	}
+	if (ss->rx_big.watchdog_needed) {
+		myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1);
+		if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
+		    myri10ge_fill_thresh)
+			ss->rx_big.watchdog_needed = 0;
+	}
+
+	if (ss->tx.req != ss->tx.done &&
+	    ss->tx.done == ss->watchdog_tx_done &&
+	    ss->watchdog_tx_req != ss->watchdog_tx_done) {
 		/* nic seems like it might be stuck.. */
 		if (rx_pause_cnt != mgp->watchdog_pause) {
 			if (net_ratelimit())
@@ -3005,8 +3101,8 @@
 	/* rearm timer */
 	mod_timer(&mgp->watchdog_timer,
 		  jiffies + myri10ge_watchdog_timeout * HZ);
-	mgp->watchdog_tx_done = mgp->tx.done;
-	mgp->watchdog_tx_req = mgp->tx.req;
+	ss->watchdog_tx_done = ss->tx.done;
+	ss->watchdog_tx_req = ss->tx.req;
 	mgp->watchdog_pause = rx_pause_cnt;
 }
 
@@ -3030,7 +3126,7 @@
 
 	mgp = netdev_priv(netdev);
 	mgp->dev = netdev;
-	netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight);
+	netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
 	mgp->pdev = pdev;
 	mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
 	mgp->pause = myri10ge_flow_control;
@@ -3076,9 +3172,9 @@
 	if (mgp->cmd == NULL)
 		goto abort_with_netdev;
 
-	mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
-					   &mgp->fw_stats_bus, GFP_KERNEL);
-	if (mgp->fw_stats == NULL)
+	mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+					   &mgp->ss.fw_stats_bus, GFP_KERNEL);
+	if (mgp->ss.fw_stats == NULL)
 		goto abort_with_cmd;
 
 	mgp->board_span = pci_resource_len(pdev, 0);
@@ -3118,12 +3214,12 @@
 		netdev->dev_addr[i] = mgp->mac_addr[i];
 
 	/* allocate rx done ring */
-	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
-	mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
-						&mgp->rx_done.bus, GFP_KERNEL);
-	if (mgp->rx_done.entry == NULL)
+	bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
+	mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+						&mgp->ss.rx_done.bus, GFP_KERNEL);
+	if (mgp->ss.rx_done.entry == NULL)
 		goto abort_with_ioremap;
-	memset(mgp->rx_done.entry, 0, bytes);
+	memset(mgp->ss.rx_done.entry, 0, bytes);
 
 	myri10ge_select_firmware(mgp);
 
@@ -3183,7 +3279,7 @@
 	}
 	dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
 		 (mgp->msi_enabled ? "MSI" : "xPIC"),
-		 netdev->irq, mgp->tx.boundary, mgp->fw_name,
+		 netdev->irq, mgp->tx_boundary, mgp->fw_name,
 		 (mgp->wc_enabled ? "Enabled" : "Disabled"));
 
 	return 0;
@@ -3195,9 +3291,9 @@
 	myri10ge_dummy_rdma(mgp, 0);
 
 abort_with_rx_done:
-	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
 	dma_free_coherent(&pdev->dev, bytes,
-			  mgp->rx_done.entry, mgp->rx_done.bus);
+			  mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
 
 abort_with_ioremap:
 	iounmap(mgp->sram);
@@ -3207,8 +3303,8 @@
 	if (mgp->mtrr >= 0)
 		mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
 #endif
-	dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
-			  mgp->fw_stats, mgp->fw_stats_bus);
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+			  mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
 
 abort_with_cmd:
 	dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
@@ -3246,9 +3342,9 @@
 	/* avoid a memory leak */
 	pci_restore_state(pdev);
 
-	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
 	dma_free_coherent(&pdev->dev, bytes,
-			  mgp->rx_done.entry, mgp->rx_done.bus);
+			  mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
 
 	iounmap(mgp->sram);
 
@@ -3256,8 +3352,8 @@
 	if (mgp->mtrr >= 0)
 		mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
 #endif
-	dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
-			  mgp->fw_stats, mgp->fw_stats_bus);
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+			  mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
 
 	dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
 			  mgp->cmd, mgp->cmd_bus);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 58e5717..fdbeeee 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -10,7 +10,7 @@
 	__be32 low;
 };
 
-/* 4 Bytes.  8 Bytes for NDIS drivers. */
+/* 4 Bytes */
 struct mcp_slot {
 	__sum16 checksum;
 	__be16 length;
@@ -144,6 +144,7 @@
 	 * a power of 2 number of entries.  */
 
 	MXGEFW_CMD_SET_INTRQ_SIZE,	/* in bytes */
+#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK  (1 << 31)
 
 	/* command to bring ethernet interface up.  Above parameters
 	 * (plus mtu & mac address) must have been exchanged prior
@@ -221,10 +222,14 @@
 	MXGEFW_CMD_GET_MAX_RSS_QUEUES,
 	MXGEFW_CMD_ENABLE_RSS_QUEUES,
 	/* data0 = number of slices n (0, 1, ..., n-1) to enable
-	 * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue.
+	 * data1 = interrupt mode.
+	 * 0=share one INTx/MSI, 1=use one MSI-X per queue.
 	 * If all queues share one interrupt, the driver must have set
 	 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
 	 */
+#define MXGEFW_SLICE_INTR_MODE_SHARED 0
+#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1
+
 	MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
 	MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
 	/* data0, data1 = bus address lsw, msw */
@@ -241,10 +246,14 @@
 	 * 0: disable rss.  nic does not distribute receive packets.
 	 * 1: enable rss.  nic distributes receive packets among queues.
 	 * data1 = hash type
-	 * 1: IPV4
-	 * 2: TCP_IPV4
-	 * 3: IPV4 | TCP_IPV4
+	 * 1: IPV4            (required by RSS)
+	 * 2: TCP_IPV4        (required by RSS)
+	 * 3: IPV4 | TCP_IPV4 (required by RSS)
+	 * 4: source port
 	 */
+#define MXGEFW_RSS_HASH_TYPE_IPV4      0x1
+#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4  0x2
+#define MXGEFW_RSS_HASH_TYPE_SRC_PORT  0x4
 
 	MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
 	/* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -260,6 +269,8 @@
 	 * 0: Linux/FreeBSD style (NIC default)
 	 * 1: NDIS/NetBSD style
 	 */
+#define MXGEFW_TSO_MODE_LINUX  0
+#define MXGEFW_TSO_MODE_NDIS   1
 
 	MXGEFW_CMD_MDIO_READ,
 	/* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
@@ -286,6 +297,38 @@
 	/* Return data = NIC memory offset of mcp_vpump_public_global */
 	MXGEFW_CMD_RESET_VPUMP,
 	/* Resets the VPUMP state */
+
+	MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE,
+	/* data0 = mcp_slot type to use.
+	 * 0 = the default 4B mcp_slot
+	 * 1 = 8B mcp_slot_8
+	 */
+#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN        0
+#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH  1
+
+	MXGEFW_CMD_SET_THROTTLE_FACTOR,
+	/* set the throttle factor for ethp_z8e
+	 * data0 = throttle_factor
+	 * throttle_factor = 256 * pcie-raw-speed / tx_speed
+	 * tx_speed = 256 * pcie-raw-speed / throttle_factor
+	 *
+	 * For PCI-E x8: pcie-raw-speed == 16Gb/s
+	 * For PCI-E x4: pcie-raw-speed == 8Gb/s
+	 *
+	 * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s
+	 * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s
+	 *
+	 * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s
+	 * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
+	 */
+
+	MXGEFW_CMD_VPUMP_UP,
+	/* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
+	MXGEFW_CMD_GET_VPUMP_CLK,
+	/* Get the lanai clock */
+
+	MXGEFW_CMD_GET_DCA_OFFSET,
+	/* offset of dca control for WDMAs */
 };
 
 enum myri10ge_mcp_cmd_status {
@@ -302,7 +345,8 @@
 	MXGEFW_CMD_ERROR_UNALIGNED,
 	MXGEFW_CMD_ERROR_NO_MDIO,
 	MXGEFW_CMD_ERROR_XFP_FAILURE,
-	MXGEFW_CMD_ERROR_XFP_ABSENT
+	MXGEFW_CMD_ERROR_XFP_ABSENT,
+	MXGEFW_CMD_ERROR_BAD_PCIE_LINK
 };
 
 #define MXGEFW_OLD_IRQ_DATA_LEN 40
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 16a810d..07d65c2 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -1,30 +1,6 @@
 #ifndef __MYRI10GE_MCP_GEN_HEADER_H__
 #define __MYRI10GE_MCP_GEN_HEADER_H__
 
-/* this file define a standard header used as a first entry point to
- * exchange information between firmware/driver and driver.  The
- * header structure can be anywhere in the mcp. It will usually be in
- * the .data section, because some fields needs to be initialized at
- * compile time.
- * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
- * contains the location of the header.
- *
- * Typically a MCP will start with the following:
- * .text
- * .space 52    ! to help catch MEMORY_INT errors
- * bt start     ! jump to real code
- * nop
- * .long _gen_mcp_header
- *
- * The source will have a definition like:
- *
- * mcp_gen_header_t gen_mcp_header = {
- * .header_length = sizeof(mcp_gen_header_t),
- * .mcp_type = MCP_TYPE_XXX,
- * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
- * .mcp_globals = (unsigned)&Globals
- * };
- */
 
 #define MCP_HEADER_PTR_OFFSET  0x3c
 
@@ -32,13 +8,14 @@
 #define MCP_TYPE_PCIE 0x70636965	/* "PCIE" pcie-only MCP */
 #define MCP_TYPE_ETH 0x45544820	/* "ETH " */
 #define MCP_TYPE_MCP0 0x4d435030	/* "MCP0" */
+#define MCP_TYPE_DFLT 0x20202020	/* "    " */
 
 struct mcp_gen_header {
 	/* the first 4 fields are filled at compile time */
 	unsigned header_length;
 	__be32 mcp_type;
 	char version[128];
-	unsigned mcp_globals;	/* pointer to mcp-type specific structure */
+	unsigned mcp_private;	/* pointer to mcp-type specific structure */
 
 	/* filled by the MCP at run-time */
 	unsigned sram_size;
@@ -53,6 +30,18 @@
 	 *
 	 * Never remove any field.  Keep everything naturally align.
 	 */
+
+	/* Specifies if the running mcp is mcp0, 1, or 2. */
+	unsigned char mcp_index;
+	unsigned char disable_rabbit;
+	unsigned char unaligned_tlp;
+	unsigned char pad1;
+	unsigned counters_addr;
+	unsigned copy_block_info;	/* for small mcps loaded with "lload -d" */
+	unsigned short handoff_id_major;	/* must be equal */
+	unsigned short handoff_id_caps;	/* bitfield: new mcp must have superset */
+	unsigned msix_table_addr;	/* start address of msix table in firmware */
+	/* 8 */
 };
 
 #endif				/* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 57cfd72..918f802 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -865,7 +865,6 @@
 	return 0;
 }
 
-
 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
 {
 	unsigned long flags;
@@ -900,7 +899,6 @@
 	return 0;
 }
 
-
 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 {
 	struct niu_link_config *lp = &np->link_config;
@@ -957,7 +955,6 @@
 	return err;
 }
 
-
 static int bcm8704_reset(struct niu *np)
 {
 	int err, limit;
@@ -1357,8 +1354,6 @@
 	return 0;
 }
 
-
-
 static int xcvr_init_1g_rgmii(struct niu *np)
 {
 	int err;
@@ -1419,7 +1414,6 @@
 	return 0;
 }
 
-
 static int mii_init_common(struct niu *np)
 {
 	struct niu_link_config *lp = &np->link_config;
@@ -7008,31 +7002,20 @@
 	return 0;
 }
 
-/* niu board models have a trailing dash version incremented
- * with HW rev change. Need to ingnore the  dash version while
- * checking for match
- *
- * for example, for the 10G card the current vpd.board_model
- * is 501-5283-04, of which -04 is the  dash version and have
- * to be ignored
- */
-static int niu_board_model_match(struct niu *np, const char *model)
-{
-	return !strncmp(np->vpd.board_model, model, strlen(model));
-}
-
 static int niu_pci_vpd_get_nports(struct niu *np)
 {
 	int ports = 0;
 
-	if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) ||
-	    (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) ||
-	    (niu_board_model_match(np, NIU_ALONSO_BM_STR))) {
+	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
+	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
+	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
+	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
+	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
 		ports = 4;
-	} else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) ||
-		   (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) ||
-		   (niu_board_model_match(np, NIU_FOXXY_BM_STR)) ||
-		   (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) {
+	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
+		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
+		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
+		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
 		ports = 2;
 	}
 
@@ -7053,8 +7036,8 @@
 		return;
 	}
 
-	if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
-	    !strcmp(np->vpd.model, "SUNW,CP3260")) {
+	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 		np->flags |= NIU_FLAGS_10G;
 		np->flags &= ~NIU_FLAGS_FIBER;
 		np->flags |= NIU_FLAGS_XCVR_SERDES;
@@ -7065,7 +7048,7 @@
 		}
 		if (np->flags & NIU_FLAGS_10G)
 			 np->mac_xcvr = MAC_XCVR_XPCS;
-	} else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
 			      NIU_FLAGS_HOTPLUG_PHY);
 	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
@@ -7541,8 +7524,8 @@
 	u32 val;
 	int err;
 
-	if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
-	    !strcmp(np->vpd.model, "SUNW,CP3260")) {
+	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
 		num_10g = 0;
 		num_1g = 2;
 		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
@@ -7551,7 +7534,7 @@
 		       phy_encode(PORT_TYPE_1G, 1) |
 		       phy_encode(PORT_TYPE_1G, 2) |
 		       phy_encode(PORT_TYPE_1G, 3));
-	} else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
 		num_10g = 2;
 		num_1g = 0;
 		parent->num_ports = 2;
@@ -7946,6 +7929,7 @@
 	struct device_node *dp;
 	const char *phy_type;
 	const u8 *mac_addr;
+	const char *model;
 	int prop_len;
 
 	if (np->parent->plat_type == PLAT_TYPE_NIU)
@@ -8000,6 +7984,11 @@
 
 	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
 
+	model = of_get_property(dp, "model", &prop_len);
+
+	if (model)
+		strcpy(np->vpd.model, model);
+
 	return 0;
 #else
 	return -EINVAL;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 97ffbe1..12fd570 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2946,6 +2946,15 @@
 #define	NIU_ALONSO_BM_STR	"373-0202"
 #define	NIU_FOXXY_BM_STR	"501-7961"
 #define	NIU_2XGF_MRVL_BM_STR	"SK-6E82"
+#define	NIU_QGC_LP_MDL_STR	"SUNW,pcie-qgc"
+#define	NIU_2XGF_LP_MDL_STR	"SUNW,pcie-2xgf"
+#define	NIU_QGC_PEM_MDL_STR	"SUNW,pcie-qgc-pem"
+#define	NIU_2XGF_PEM_MDL_STR	"SUNW,pcie-2xgf-pem"
+#define	NIU_ALONSO_MDL_STR	"SUNW,CP3220"
+#define	NIU_KIMI_MDL_STR	"SUNW,CP3260"
+#define	NIU_MARAMBA_MDL_STR	"SUNW,pcie-neptune"
+#define	NIU_FOXXY_MDL_STR	"SUNW,pcie-rfem"
+#define	NIU_2XGF_MRVL_MDL_STR	"SysKonnect,pcie-2xgf"
 
 #define NIU_VPD_MIN_MAJOR	3
 #define NIU_VPD_MIN_MINOR	4
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index d3207c0..1f4ca2b 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2458,6 +2458,7 @@
 
 out3:
 	atomic_dec(&ppp_unit_count);
+	unregister_netdev(dev);
 out2:
 	mutex_unlock(&all_ppp_mutex);
 	free_netdev(dev);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 244d783..79359919 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1621,9 +1621,16 @@
 end:
 	release_sock(sk);
 
-	if (error != 0)
-		PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
-		       "%s: connect failed: %d\n", session->name, error);
+	if (error != 0) {
+		if (session)
+			PRINTK(session->debug,
+				PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+				"%s: connect failed: %d\n",
+				session->name, error);
+		else
+			PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+				"connect failed: %d\n", error);
+	}
 
 	return error;
 }
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 0d32123..1dae1f2 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2474,6 +2474,8 @@
 
 	pr_debug("%s: <-\n", __func__);
 
+	free_page((unsigned long)wl->buf);
+
 	pr_debug("%s: destroy queues\n", __func__);
 	destroy_workqueue(wl->eurus_cmd_queue);
 	destroy_workqueue(wl->event_queue);
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 0f02344..1d2daee 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
 sfc-y			+= efx.o falcon.o tx.o rx.o falcon_xmac.o \
-			   i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
-			   tenxpress.o boards.o sfe4001.o
+			   i2c-direct.o selftest.o ethtool.o xfp_phy.o \
+			   mdio_10g.o tenxpress.o boards.o sfe4001.o
 
 obj-$(CONFIG_SFC)	+= sfc.o
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index f56341d..695764d 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -22,5 +22,7 @@
 extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
 extern int sfe4001_poweron(struct efx_nic *efx);
 extern void sfe4001_poweroff(struct efx_nic *efx);
+/* Are we putting the PHY into flash config mode */
+extern unsigned int sfe4001_phy_flash_cfg;
 
 #endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 59edcf7..418f2e5 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1873,6 +1873,7 @@
 		tx_queue->queue = i;
 		tx_queue->buffer = NULL;
 		tx_queue->channel = &efx->channel[0]; /* for safety */
+		tx_queue->tso_headers_free = NULL;
 	}
 	for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
 		rx_queue = &efx->rx_queue[i];
@@ -2071,7 +2072,8 @@
 	net_dev = alloc_etherdev(sizeof(*efx));
 	if (!net_dev)
 		return -ENOMEM;
-	net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
+	net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
+			      NETIF_F_HIGHDMA | NETIF_F_TSO);
 	if (lro)
 		net_dev->features |= NETIF_F_LRO;
 	efx = net_dev->priv;
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 43663a4..c53290d 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -10,6 +10,55 @@
 #ifndef EFX_ENUM_H
 #define EFX_ENUM_H
 
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_XGMII: loopback within MAC at XGMII level
+ * @LOOPBACK_XGXS: loopback within MAC at XGXS level
+ * @LOOPBACK_XAUI: loopback within MAC at XAUI level
+ * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
+ * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
+ */
+/* Please keep in order and up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+	LOOPBACK_NONE = 0,
+	LOOPBACK_MAC = 1,
+	LOOPBACK_XGMII = 2,
+	LOOPBACK_XGXS = 3,
+	LOOPBACK_XAUI = 4,
+	LOOPBACK_PHY = 5,
+	LOOPBACK_PHYXS = 6,
+	LOOPBACK_PCS = 7,
+	LOOPBACK_PMAPMD = 8,
+	LOOPBACK_NETWORK = 9,
+	LOOPBACK_MAX
+};
+
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+extern const char *efx_loopback_mode_names[];
+#define LOOPBACK_MODE_NAME(mode)			\
+	STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
+#define LOOPBACK_MODE(efx)				\
+	LOOPBACK_MODE_NAME(efx->loopback_mode)
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
+				(1 << LOOPBACK_XGXS) | \
+				(1 << LOOPBACK_XAUI))
+
+#define LOOPBACK_MASK(_efx)			\
+	(1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx)						\
+	((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask)		\
+	(((LOOPBACK_MASK(_from) & (_mask)) &&		\
+	  ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
+
 /*****************************************************************************/
 
 /**
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ad541ba..e2c75d1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -12,12 +12,26 @@
 #include <linux/ethtool.h>
 #include <linux/rtnetlink.h>
 #include "net_driver.h"
+#include "selftest.h"
 #include "efx.h"
 #include "ethtool.h"
 #include "falcon.h"
 #include "gmii.h"
 #include "mac.h"
 
+const char *efx_loopback_mode_names[] = {
+	[LOOPBACK_NONE]		= "NONE",
+	[LOOPBACK_MAC]		= "MAC",
+	[LOOPBACK_XGMII]	= "XGMII",
+	[LOOPBACK_XGXS]		= "XGXS",
+	[LOOPBACK_XAUI] 	= "XAUI",
+	[LOOPBACK_PHY]		= "PHY",
+	[LOOPBACK_PHYXS]	= "PHY(XS)",
+	[LOOPBACK_PCS]	 	= "PHY(PCS)",
+	[LOOPBACK_PMAPMD]	= "PHY(PMAPMD)",
+	[LOOPBACK_NETWORK]	= "NETWORK",
+};
+
 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
 
 struct ethtool_string {
@@ -217,23 +231,179 @@
 	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
 }
 
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index:		Index of the test
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ * @test:		Pointer to test result (used only if data != %NULL)
+ * @unit_format:	Unit name format (e.g. "channel\%d")
+ * @unit_id:		Unit id (e.g. 0 for "channel0")
+ * @test_format:	Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id:		Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index,
+			  struct ethtool_string *strings, u64 *data,
+			  int *test, const char *unit_format, int unit_id,
+			  const char *test_format, const char *test_id)
+{
+	struct ethtool_string unit_str, test_str;
+
+	/* Fill data value, if applicable */
+	if (data)
+		data[test_index] = *test;
+
+	/* Fill string, if applicable */
+	if (strings) {
+		snprintf(unit_str.name, sizeof(unit_str.name),
+			 unit_format, unit_id);
+		snprintf(test_str.name, sizeof(test_str.name),
+			 test_format, test_id);
+		snprintf(strings[test_index].name,
+			 sizeof(strings[test_index].name),
+			 "%-9s%-17s", unit_str.name, test_str.name);
+	}
+}
+
+#define EFX_PORT_NAME "port%d", 0
+#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter)			\
+	"loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx:		Efx NIC
+ * @lb_tests:		Efx loopback self-test results structure
+ * @mode:		Loopback test mode
+ * @test_index:		Starting index of the test
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+				  struct efx_loopback_self_tests *lb_tests,
+				  enum efx_loopback_mode mode,
+				  unsigned int test_index,
+				  struct ethtool_string *strings, u64 *data)
+{
+	struct efx_tx_queue *tx_queue;
+
+	efx_for_each_tx_queue(tx_queue, efx) {
+		efx_fill_test(test_index++, strings, data,
+			      &lb_tests->tx_sent[tx_queue->queue],
+			      EFX_TX_QUEUE_NAME(tx_queue),
+			      EFX_LOOPBACK_NAME(mode, "tx_sent"));
+		efx_fill_test(test_index++, strings, data,
+			      &lb_tests->tx_done[tx_queue->queue],
+			      EFX_TX_QUEUE_NAME(tx_queue),
+			      EFX_LOOPBACK_NAME(mode, "tx_done"));
+	}
+	efx_fill_test(test_index++, strings, data,
+		      &lb_tests->rx_good,
+		      EFX_PORT_NAME,
+		      EFX_LOOPBACK_NAME(mode, "rx_good"));
+	efx_fill_test(test_index++, strings, data,
+		      &lb_tests->rx_bad,
+		      EFX_PORT_NAME,
+		      EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+	return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx:		Efx NIC
+ * @tests:		Efx self-test results structure, or %NULL
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+				       struct efx_self_tests *tests,
+				       struct ethtool_string *strings,
+				       u64 *data)
+{
+	struct efx_channel *channel;
+	unsigned int n = 0;
+	enum efx_loopback_mode mode;
+
+	/* Interrupt */
+	efx_fill_test(n++, strings, data, &tests->interrupt,
+		      "core", 0, "interrupt", NULL);
+
+	/* Event queues */
+	efx_for_each_channel(channel, efx) {
+		efx_fill_test(n++, strings, data,
+			      &tests->eventq_dma[channel->channel],
+			      EFX_CHANNEL_NAME(channel),
+			      "eventq.dma", NULL);
+		efx_fill_test(n++, strings, data,
+			      &tests->eventq_int[channel->channel],
+			      EFX_CHANNEL_NAME(channel),
+			      "eventq.int", NULL);
+		efx_fill_test(n++, strings, data,
+			      &tests->eventq_poll[channel->channel],
+			      EFX_CHANNEL_NAME(channel),
+			      "eventq.poll", NULL);
+	}
+
+	/* PHY presence */
+	efx_fill_test(n++, strings, data, &tests->phy_ok,
+		      EFX_PORT_NAME, "phy_ok", NULL);
+
+	/* Loopback tests */
+	efx_fill_test(n++, strings, data, &tests->loopback_speed,
+		      EFX_PORT_NAME, "loopback.speed", NULL);
+	efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
+		      EFX_PORT_NAME, "loopback.full_duplex", NULL);
+	for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+		if (!(efx->loopback_modes & (1 << mode)))
+			continue;
+		n = efx_fill_loopback_test(efx,
+					   &tests->loopback[mode], mode, n,
+					   strings, data);
+	}
+
+	return n;
+}
+
 static int efx_ethtool_get_stats_count(struct net_device *net_dev)
 {
 	return EFX_ETHTOOL_NUM_STATS;
 }
 
+static int efx_ethtool_self_test_count(struct net_device *net_dev)
+{
+	struct efx_nic *efx = net_dev->priv;
+
+	return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+}
+
 static void efx_ethtool_get_strings(struct net_device *net_dev,
 				    u32 string_set, u8 *strings)
 {
+	struct efx_nic *efx = net_dev->priv;
 	struct ethtool_string *ethtool_strings =
 		(struct ethtool_string *)strings;
 	int i;
 
-	if (string_set == ETH_SS_STATS)
+	switch (string_set) {
+	case ETH_SS_STATS:
 		for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
 			strncpy(ethtool_strings[i].name,
 				efx_ethtool_stats[i].name,
 				sizeof(ethtool_strings[i].name));
+		break;
+	case ETH_SS_TEST:
+		efx_ethtool_fill_self_tests(efx, NULL,
+					    ethtool_strings, NULL);
+		break;
+	default:
+		/* No other string sets */
+		break;
+	}
 }
 
 static void efx_ethtool_get_stats(struct net_device *net_dev,
@@ -272,6 +442,22 @@
 	}
 }
 
+static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
+{
+	int rc;
+
+	/* Our TSO requires TX checksumming, so force TX checksumming
+	 * on when TSO is enabled.
+	 */
+	if (enable) {
+		rc = efx_ethtool_set_tx_csum(net_dev, 1);
+		if (rc)
+			return rc;
+	}
+
+	return ethtool_op_set_tso(net_dev, enable);
+}
+
 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
 {
 	struct efx_nic *efx = net_dev->priv;
@@ -283,6 +469,15 @@
 
 	efx_flush_queues(efx);
 
+	/* Our TSO requires TX checksumming, so disable TSO when
+	 * checksumming is disabled
+	 */
+	if (!enable) {
+		rc = efx_ethtool_set_tso(net_dev, 0);
+		if (rc)
+			return rc;
+	}
+
 	return 0;
 }
 
@@ -305,6 +500,64 @@
 	return efx->rx_checksum_enabled;
 }
 
+static void efx_ethtool_self_test(struct net_device *net_dev,
+				  struct ethtool_test *test, u64 *data)
+{
+	struct efx_nic *efx = net_dev->priv;
+	struct efx_self_tests efx_tests;
+	int offline, already_up;
+	int rc;
+
+	ASSERT_RTNL();
+	if (efx->state != STATE_RUNNING) {
+		rc = -EIO;
+		goto fail1;
+	}
+
+	/* We need rx buffers and interrupts. */
+	already_up = (efx->net_dev->flags & IFF_UP);
+	if (!already_up) {
+		rc = dev_open(efx->net_dev);
+		if (rc) {
+			EFX_ERR(efx, "failed opening device.\n");
+			goto fail2;
+		}
+	}
+
+	memset(&efx_tests, 0, sizeof(efx_tests));
+	offline = (test->flags & ETH_TEST_FL_OFFLINE);
+
+	/* Perform online self tests first */
+	rc = efx_online_test(efx, &efx_tests);
+	if (rc)
+		goto out;
+
+	/* Perform offline tests only if online tests passed */
+	if (offline) {
+		/* Stop the kernel from sending packets during the test. */
+		efx_stop_queue(efx);
+		rc = efx_flush_queues(efx);
+		if (!rc)
+			rc = efx_offline_test(efx, &efx_tests,
+					      efx->loopback_modes);
+		efx_wake_queue(efx);
+	}
+
+ out:
+	if (!already_up)
+		dev_close(efx->net_dev);
+
+	EFX_LOG(efx, "%s all %sline self-tests\n",
+		rc == 0 ? "passed" : "failed", offline ? "off" : "on");
+
+ fail2:
+ fail1:
+	/* Fill ethtool results structures */
+	efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+	if (rc)
+		test->flags |= ETH_TEST_FL_FAILED;
+}
+
 /* Restart autonegotiation */
 static int efx_ethtool_nway_reset(struct net_device *net_dev)
 {
@@ -451,8 +704,12 @@
 	.set_tx_csum		= efx_ethtool_set_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
+	.get_tso		= ethtool_op_get_tso,
+	.set_tso		= efx_ethtool_set_tso,
 	.get_flags		= ethtool_op_get_flags,
 	.set_flags		= ethtool_op_set_flags,
+	.self_test_count	= efx_ethtool_self_test_count,
+	.self_test		= efx_ethtool_self_test,
 	.get_strings		= efx_ethtool_get_strings,
 	.phys_id		= efx_ethtool_phys_id,
 	.get_stats_count	= efx_ethtool_get_stats_count,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 46db549..b57cc68 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1129,6 +1129,7 @@
 	case RX_RECOVERY_EV_DECODE:
 		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
 			"Resetting.\n", channel->channel);
+		atomic_inc(&efx->rx_reset);
 		efx_schedule_reset(efx,
 				   EFX_WORKAROUND_6555(efx) ?
 				   RESET_TYPE_RX_RECOVERY :
@@ -1731,7 +1732,8 @@
 	efx_oword_t temp;
 	int count;
 
-	if (FALCON_REV(efx) < FALCON_REV_B0)
+	if ((FALCON_REV(efx) < FALCON_REV_B0) ||
+	    (efx->loopback_mode != LOOPBACK_NONE))
 		return;
 
 	falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
@@ -2091,6 +2093,8 @@
 			efx->phy_type);
 		return -1;
 	}
+
+	efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
 	return 0;
 }
 
@@ -2468,14 +2472,12 @@
  fail5:
 	falcon_free_buffer(efx, &efx->irq_status);
  fail4:
-	/* fall-thru */
  fail3:
 	if (nic_data->pci_dev2) {
 		pci_dev_put(nic_data->pci_dev2);
 		nic_data->pci_dev2 = NULL;
 	}
  fail2:
-	/* fall-thru */
  fail1:
 	kfree(efx->nic_data);
 	return rc;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 0485a63..06e2d68 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -636,6 +636,14 @@
 #define XX_HIDRVA_WIDTH 1
 #define XX_LODRVA_LBN 8
 #define XX_LODRVA_WIDTH 1
+#define XX_LPBKD_LBN 3
+#define XX_LPBKD_WIDTH 1
+#define XX_LPBKC_LBN 2
+#define XX_LPBKC_WIDTH 1
+#define XX_LPBKB_LBN 1
+#define XX_LPBKB_WIDTH 1
+#define XX_LPBKA_LBN 0
+#define XX_LPBKA_WIDTH 1
 
 #define XX_TXDRV_CTL_REG_MAC 0x12
 #define XX_DEQD_LBN 28
@@ -656,8 +664,14 @@
 #define XX_DTXA_WIDTH 4
 
 /* XAUI XGXS core status register */
-#define XX_FORCE_SIG_DECODE_FORCED 0xff
 #define XX_CORE_STAT_REG_MAC 0x16
+#define XX_FORCE_SIG_LBN 24
+#define XX_FORCE_SIG_WIDTH 8
+#define XX_FORCE_SIG_DECODE_FORCED 0xff
+#define XX_XGXS_LB_EN_LBN 23
+#define XX_XGXS_LB_EN_WIDTH 1
+#define XX_XGMII_LB_EN_LBN 22
+#define XX_XGMII_LB_EN_WIDTH 1
 #define XX_ALIGN_DONE_LBN 20
 #define XX_ALIGN_DONE_WIDTH 1
 #define XX_SYNC_STAT_LBN 16
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index aa7521b..a74b793 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -32,7 +32,7 @@
 	(FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
 
 void falcon_xmac_writel(struct efx_nic *efx,
-			efx_dword_t *value, unsigned int mac_reg)
+			 efx_dword_t *value, unsigned int mac_reg)
 {
 	efx_oword_t temp;
 
@@ -69,6 +69,10 @@
 		udelay(10);
 	}
 
+	/* This often fails when DSP is disabled, ignore it */
+	if (sfe4001_phy_flash_cfg != 0)
+		return 0;
+
 	EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
 	return -ETIMEDOUT;
 }
@@ -223,7 +227,7 @@
 	/* The ISR latches, so clear it and re-read */
 	falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
 	falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
-	
+
 	if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
 	    EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
 		EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
@@ -237,7 +241,7 @@
 {
 	efx_dword_t reg;
 
-	if (FALCON_REV(efx) < FALCON_REV_B0)
+	if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
 		return;
 
 	/* Flush the ISR */
@@ -284,6 +288,9 @@
 	efx_dword_t reg;
 	int align_done, sync_status, link_ok = 0;
 
+	if (LOOPBACK_INTERNAL(efx))
+		return 1;
+
 	/* Read link status */
 	falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
 
@@ -374,6 +381,61 @@
 	falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
 }
 
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+	efx_dword_t reg;
+	int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
+	int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
+	int xgmii_loopback =
+		(efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
+
+	/* XGXS block is flaky and will need to be reset if moving
+	 * into our out of XGMII, XGXS or XAUI loopbacks. */
+	if (EFX_WORKAROUND_5147(efx)) {
+		int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+		int reset_xgxs;
+
+		falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+		old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
+		old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
+
+		falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+		old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
+
+		/* The PHY driver may have turned XAUI off */
+		reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
+			      (xaui_loopback != old_xaui_loopback) ||
+			      (xgmii_loopback != old_xgmii_loopback));
+		if (reset_xgxs) {
+			falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
+			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
+			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
+			falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+			udelay(1);
+			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
+			EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
+			falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+			udelay(1);
+		}
+	}
+
+	falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+	EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
+			    (xgxs_loopback || xaui_loopback) ?
+			    XX_FORCE_SIG_DECODE_FORCED : 0);
+	EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
+	EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
+	falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+
+	falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+	EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
+	EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
+	EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
+	EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
+	falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
+}
+
+
 /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
  * to come back up. Bash it until it comes back up */
 static int falcon_check_xaui_link_up(struct efx_nic *efx)
@@ -382,7 +444,8 @@
 	tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
 	max_tries = tries;
 
-	if (efx->phy_type == PHY_TYPE_NONE)
+	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+	    (efx->phy_type == PHY_TYPE_NONE))
 		return 0;
 
 	while (tries) {
@@ -408,8 +471,13 @@
 	falcon_mask_status_intr(efx, 0);
 
 	falcon_deconfigure_mac_wrapper(efx);
+
+	efx->tx_disabled = LOOPBACK_INTERNAL(efx);
 	efx->phy_op->reconfigure(efx);
+
+	falcon_reconfigure_xgxs_core(efx);
 	falcon_reconfigure_xmac_core(efx);
+
 	falcon_reconfigure_mac_wrapper(efx);
 
 	/* Ensure XAUI link is up */
@@ -491,13 +559,15 @@
 		(mac_stats->rx_bytes - mac_stats->rx_good_bytes);
 }
 
-#define EFX_XAUI_RETRAIN_MAX 8
-
 int falcon_check_xmac(struct efx_nic *efx)
 {
 	unsigned xaui_link_ok;
 	int rc;
 
+	if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+	    (efx->phy_type == PHY_TYPE_NONE))
+		return 0;
+
 	falcon_mask_status_intr(efx, 0);
 	xaui_link_ok = falcon_xaui_link_ok(efx);
 
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index dc06bb0..c4f540e 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -44,6 +44,9 @@
 	int status;
 	int phy_id = efx->mii.phy_id;
 
+	if (LOOPBACK_INTERNAL(efx))
+		return 0;
+
 	/* Read MMD STATUS2 to check it is responding. */
 	status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
 	if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
@@ -164,6 +167,22 @@
 	int mmd = 0;
 	int good;
 
+	/* If the port is in loopback, then we should only consider a subset
+	 * of mmd's */
+	if (LOOPBACK_INTERNAL(efx))
+		return 1;
+	else if (efx->loopback_mode == LOOPBACK_NETWORK)
+		return 0;
+	else if (efx->loopback_mode == LOOPBACK_PHYXS)
+		mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
+			      MDIO_MMDREG_DEVS0_PCS |
+			      MDIO_MMDREG_DEVS0_PMAPMD);
+	else if (efx->loopback_mode == LOOPBACK_PCS)
+		mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
+			      MDIO_MMDREG_DEVS0_PMAPMD);
+	else if (efx->loopback_mode == LOOPBACK_PMAPMD)
+		mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
+
 	while (mmd_mask) {
 		if (mmd_mask & 1) {
 			/* Double reads because link state is latched, and a
@@ -182,6 +201,65 @@
 	return ok;
 }
 
+void mdio_clause45_transmit_disable(struct efx_nic *efx)
+{
+	int phy_id = efx->mii.phy_id;
+	int ctrl1, ctrl2;
+
+	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+					   MDIO_MMDREG_TXDIS);
+	if (efx->tx_disabled)
+		ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+	else
+		ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+	if (ctrl1 != ctrl2)
+		mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+				    MDIO_MMDREG_TXDIS, ctrl2);
+}
+
+void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
+{
+	int phy_id = efx->mii.phy_id;
+	int ctrl1, ctrl2;
+
+	/* Handle (with debouncing) PMA/PMD loopback */
+	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+					   MDIO_MMDREG_CTRL1);
+
+	if (efx->loopback_mode == LOOPBACK_PMAPMD)
+		ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+	else
+		ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+
+	if (ctrl1 != ctrl2)
+		mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+				    MDIO_MMDREG_CTRL1, ctrl2);
+
+	/* Handle (with debouncing) PCS loopback */
+	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
+					   MDIO_MMDREG_CTRL1);
+	if (efx->loopback_mode == LOOPBACK_PCS)
+		ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+	else
+		ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+	if (ctrl1 != ctrl2)
+		mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
+				    MDIO_MMDREG_CTRL1, ctrl2);
+
+	/* Handle (with debouncing) PHYXS network loopback */
+	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+					   MDIO_MMDREG_CTRL1);
+	if (efx->loopback_mode == LOOPBACK_NETWORK)
+		ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+	else
+		ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+	if (ctrl1 != ctrl2)
+		mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+				    MDIO_MMDREG_CTRL1, ctrl2);
+}
+
 /**
  * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
  * @efx:		Efx NIC
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 2214b6d..cb99f3f 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -44,11 +44,16 @@
 #define MDIO_MMDREG_DEVS1	(6)
 #define MDIO_MMDREG_CTRL2	(7)
 #define MDIO_MMDREG_STAT2	(8)
+#define MDIO_MMDREG_TXDIS	(9)
 
 /* Bits in MMDREG_CTRL1 */
 /* Reset */
 #define MDIO_MMDREG_CTRL1_RESET_LBN	(15)
 #define MDIO_MMDREG_CTRL1_RESET_WIDTH	(1)
+/* Loopback */
+/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
+#define MDIO_MMDREG_CTRL1_LBACK_LBN	(14)
+#define MDIO_MMDREG_CTRL1_LBACK_WIDTH	(1)
 
 /* Bits in MMDREG_STAT1 */
 #define MDIO_MMDREG_STAT1_FAULT_LBN	(7)
@@ -56,6 +61,9 @@
 /* Link state */
 #define MDIO_MMDREG_STAT1_LINK_LBN	(2)
 #define MDIO_MMDREG_STAT1_LINK_WIDTH	(1)
+/* Low power ability */
+#define MDIO_MMDREG_STAT1_LPABLE_LBN	(1)
+#define MDIO_MMDREG_STAT1_LPABLE_WIDTH	(1)
 
 /* Bits in ID reg */
 #define MDIO_ID_REV(_id32)	(_id32 & 0xf)
@@ -76,6 +84,14 @@
 #define MDIO_MMDREG_STAT2_PRESENT_LBN	(14)
 #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
 
+/* Bits in MMDREG_TXDIS */
+#define MDIO_MMDREG_TXDIS_GLOBAL_LBN    (0)
+#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH  (1)
+
+/* MMD-specific bits, ordered by MMD, then register */
+#define MDIO_PMAPMD_CTRL1_LBACK_LBN	(0)
+#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH	(1)
+
 /* PMA type (4 bits) */
 #define MDIO_PMAPMD_CTRL2_10G_CX4	(0x0)
 #define MDIO_PMAPMD_CTRL2_10G_EW	(0x1)
@@ -95,7 +111,7 @@
 #define MDIO_PMAPMD_CTRL2_10_BT		(0xf)
 #define MDIO_PMAPMD_CTRL2_TYPE_MASK	(0xf)
 
-/* /\* PHY XGXS lane state *\/ */
+/* PHY XGXS lane state */
 #define MDIO_PHYXS_LANE_STATE		(0x18)
 #define MDIO_PHYXS_LANE_ALIGNED_LBN	(12)
 
@@ -217,6 +233,12 @@
 extern int mdio_clause45_links_ok(struct efx_nic *efx,
 				  unsigned int mmd_mask);
 
+/* Generic transmit disable support though PMAPMD */
+extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
+
+/* Generic part of reconfigure: set/clear loopback bits */
+extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
+
 /* Read (some of) the PHY settings over MDIO */
 extern void mdio_clause45_get_settings(struct efx_nic *efx,
 				       struct ethtool_cmd *ecmd);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index c505482..59f261b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -134,6 +134,8 @@
  *	Set only on the final fragment of a packet; %NULL for all other
  *	fragments.  When this fragment completes, then we can free this
  *	skb.
+ * @tsoh: The associated TSO header structure, or %NULL if this
+ *	buffer is not a TSO header.
  * @dma_addr: DMA address of the fragment.
  * @len: Length of this fragment.
  *	This field is zero when the queue slot is empty.
@@ -144,6 +146,7 @@
  */
 struct efx_tx_buffer {
 	const struct sk_buff *skb;
+	struct efx_tso_header *tsoh;
 	dma_addr_t dma_addr;
 	unsigned short len;
 	unsigned char continuation;
@@ -187,6 +190,13 @@
  *	variable indicates that the queue is full.  This is to
  *	avoid cache-line ping-pong between the xmit path and the
  *	completion path.
+ * @tso_headers_free: A list of TSO headers allocated for this TX queue
+ *	that are not in use, and so available for new TSO sends. The list
+ *	is protected by the TX queue lock.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ *	blocks
+ * @tso_packets: Number of packets via the TSO xmit path
  */
 struct efx_tx_queue {
 	/* Members which don't change on the fast path */
@@ -206,6 +216,10 @@
 	unsigned int insert_count ____cacheline_aligned_in_smp;
 	unsigned int write_count;
 	unsigned int old_read_count;
+	struct efx_tso_header *tso_headers_free;
+	unsigned int tso_bursts;
+	unsigned int tso_long_headers;
+	unsigned int tso_packets;
 };
 
 /**
@@ -434,6 +448,9 @@
 	struct efx_blinker blinker;
 };
 
+#define STRING_TABLE_LOOKUP(val, member)	\
+	member ## _names[val]
+
 enum efx_int_mode {
 	/* Be careful if altering to correct macro below */
 	EFX_INT_MODE_MSIX = 0,
@@ -506,6 +523,7 @@
  * @check_hw: Check hardware
  * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
  * @mmds: MMD presence mask
+ * @loopbacks: Supported loopback modes mask
  */
 struct efx_phy_operations {
 	int (*init) (struct efx_nic *efx);
@@ -515,6 +533,7 @@
 	int (*check_hw) (struct efx_nic *efx);
 	void (*reset_xaui) (struct efx_nic *efx);
 	int mmds;
+	unsigned loopbacks;
 };
 
 /*
@@ -653,7 +672,6 @@
  * @phy_op: PHY interface
  * @phy_data: PHY private data (including PHY-specific stats)
  * @mii: PHY interface
- * @phy_powered: PHY power state
  * @tx_disabled: PHY transmitter turned off
  * @link_up: Link status
  * @link_options: Link options (MII/GMII format)
@@ -662,6 +680,9 @@
  * @multicast_hash: Multicast hash table
  * @flow_control: Flow control flags - separate RX/TX so can't use link_options
  * @reconfigure_work: work item for dealing with PHY events
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
  *
  * The @priv field of the corresponding &struct net_device points to
  * this.
@@ -721,6 +742,7 @@
 	struct efx_phy_operations *phy_op;
 	void *phy_data;
 	struct mii_if_info mii;
+	unsigned tx_disabled;
 
 	int link_up;
 	unsigned int link_options;
@@ -732,6 +754,10 @@
 	struct work_struct reconfigure_work;
 
 	atomic_t rx_reset;
+	enum efx_loopback_mode loopback_mode;
+	unsigned int loopback_modes;
+
+	void *loopback_selftest;
 };
 
 /**
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 551299b..6706223 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -19,6 +19,7 @@
 #include "rx.h"
 #include "efx.h"
 #include "falcon.h"
+#include "selftest.h"
 #include "workarounds.h"
 
 /* Number of RX descriptors pushed at once. */
@@ -683,6 +684,15 @@
 	struct sk_buff *skb;
 	int lro = efx->net_dev->features & NETIF_F_LRO;
 
+	/* If we're in loopback test, then pass the packet directly to the
+	 * loopback layer, and free the rx_buf here
+	 */
+	if (unlikely(efx->loopback_selftest)) {
+		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+		efx_free_rx_buffer(efx, rx_buf);
+		goto done;
+	}
+
 	if (rx_buf->skb) {
 		prefetch(skb_shinfo(rx_buf->skb));
 
@@ -736,7 +746,6 @@
 	/* Update allocation strategy method */
 	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
 
-	/* fall-thru */
 done:
 	efx->net_dev->last_rx = jiffies;
 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
new file mode 100644
index 0000000..cbda159
--- /dev/null
+++ b/drivers/net/sfc/selftest.c
@@ -0,0 +1,717 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <asm/io.h>
+#include "net_driver.h"
+#include "ethtool.h"
+#include "efx.h"
+#include "falcon.h"
+#include "selftest.h"
+#include "boards.h"
+#include "workarounds.h"
+#include "mac.h"
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+	struct ethhdr header;
+	struct iphdr ip;
+	struct udphdr udp;
+	__be16 iteration;
+	const char msg[64];
+} __attribute__ ((packed));
+
+/* Loopback test source MAC address */
+static const unsigned char payload_source[ETH_ALEN] = {
+	0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char *payload_msg =
+	"Hello world! This is an Efx loopback test in progress!";
+
+/**
+ * efx_selftest_state - persistent state during a selftest
+ * @flush:		Drop all packets in efx_loopback_rx_packet
+ * @packet_count:	Number of packets being used in this test
+ * @skbs:		An array of skbs transmitted
+ * @rx_good:		RX good packet count
+ * @rx_bad:		RX bad packet count
+ * @payload:		Payload used in tests
+ */
+struct efx_selftest_state {
+	int flush;
+	int packet_count;
+	struct sk_buff **skbs;
+	atomic_t rx_good;
+	atomic_t rx_bad;
+	struct efx_loopback_payload payload;
+};
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************/
+
+/* Level of loopback testing
+ *
+ * The maximum packet burst length is 16**(n-1), i.e.
+ *
+ * - Level 0 : no packets
+ * - Level 1 : 1 packet
+ * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
+ * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
+ *
+ */
+static unsigned int loopback_test_level = 3;
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+			       struct efx_self_tests *tests)
+{
+	struct efx_channel *channel;
+
+	EFX_LOG(efx, "testing interrupts\n");
+	tests->interrupt = -1;
+
+	/* Reset interrupt flag */
+	efx->last_irq_cpu = -1;
+	smp_wmb();
+
+	/* ACK each interrupting event queue. Receiving an interrupt due to
+	 * traffic before a test event is raised is considered a pass */
+	efx_for_each_channel_with_interrupt(channel, efx) {
+		if (channel->work_pending)
+			efx_process_channel_now(channel);
+		if (efx->last_irq_cpu >= 0)
+			goto success;
+	}
+
+	falcon_generate_interrupt(efx);
+
+	/* Wait for arrival of test interrupt. */
+	EFX_LOG(efx, "waiting for test interrupt\n");
+	schedule_timeout_uninterruptible(HZ / 10);
+	if (efx->last_irq_cpu >= 0)
+		goto success;
+
+	EFX_ERR(efx, "timed out waiting for interrupt\n");
+	return -ETIMEDOUT;
+
+ success:
+	EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
+		efx->interrupt_mode, efx->last_irq_cpu);
+	tests->interrupt = 1;
+	return 0;
+}
+
+/* Test generation and receipt of non-interrupting events */
+static int efx_test_eventq(struct efx_channel *channel,
+			   struct efx_self_tests *tests)
+{
+	unsigned int magic;
+
+	/* Channel specific code, limited to 20 bits */
+	magic = (0x00010150 + channel->channel);
+	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+		channel->channel, magic);
+
+	tests->eventq_dma[channel->channel] = -1;
+	tests->eventq_int[channel->channel] = 1;	/* fake pass */
+	tests->eventq_poll[channel->channel] = 1;	/* fake pass */
+
+	/* Reset flag and zero magic word */
+	channel->efx->last_irq_cpu = -1;
+	channel->eventq_magic = 0;
+	smp_wmb();
+
+	falcon_generate_test_event(channel, magic);
+	udelay(1);
+
+	efx_process_channel_now(channel);
+	if (channel->eventq_magic != magic) {
+		EFX_ERR(channel->efx, "channel %d  failed to see test event\n",
+			channel->channel);
+		return -ETIMEDOUT;
+	} else {
+		tests->eventq_dma[channel->channel] = 1;
+	}
+
+	return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_channel *channel,
+			       struct efx_self_tests *tests)
+{
+	unsigned int magic, count;
+
+	/* Channel specific code, limited to 20 bits */
+	magic = (0x00010150 + channel->channel);
+	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+		channel->channel, magic);
+
+	tests->eventq_dma[channel->channel] = -1;
+	tests->eventq_int[channel->channel] = -1;
+	tests->eventq_poll[channel->channel] = -1;
+
+	/* Reset flag and zero magic word */
+	channel->efx->last_irq_cpu = -1;
+	channel->eventq_magic = 0;
+	smp_wmb();
+
+	falcon_generate_test_event(channel, magic);
+
+	/* Wait for arrival of interrupt */
+	count = 0;
+	do {
+		schedule_timeout_uninterruptible(HZ / 100);
+
+		if (channel->work_pending)
+			efx_process_channel_now(channel);
+
+		if (channel->eventq_magic == magic)
+			goto eventq_ok;
+	} while (++count < 2);
+
+	EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
+		channel->channel);
+
+	/* See if interrupt arrived */
+	if (channel->efx->last_irq_cpu >= 0) {
+		EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
+			"during event queue test\n", channel->channel,
+			raw_smp_processor_id());
+		tests->eventq_int[channel->channel] = 1;
+	}
+
+	/* Check to see if event was received even if interrupt wasn't */
+	efx_process_channel_now(channel);
+	if (channel->eventq_magic == magic) {
+		EFX_ERR(channel->efx, "channel %d event was generated, but "
+			"failed to trigger an interrupt\n", channel->channel);
+		tests->eventq_dma[channel->channel] = 1;
+	}
+
+	return -ETIMEDOUT;
+ eventq_ok:
+	EFX_LOG(channel->efx, "channel %d event queue passed\n",
+		channel->channel);
+	tests->eventq_dma[channel->channel] = 1;
+	tests->eventq_int[channel->channel] = 1;
+	tests->eventq_poll[channel->channel] = 1;
+	return 0;
+}
+
+/**************************************************************************
+ *
+ * PHY testing
+ *
+ **************************************************************************/
+
+/* Check PHY presence by reading the PHY ID registers */
+static int efx_test_phy(struct efx_nic *efx,
+			struct efx_self_tests *tests)
+{
+	u16 physid1, physid2;
+	struct mii_if_info *mii = &efx->mii;
+	struct net_device *net_dev = efx->net_dev;
+
+	if (efx->phy_type == PHY_TYPE_NONE)
+		return 0;
+
+	EFX_LOG(efx, "testing PHY presence\n");
+	tests->phy_ok = -1;
+
+	physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
+	physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+
+	if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
+	    (physid2 != 0x0000) && (physid2 != 0xffff)) {
+		EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
+			mii->phy_id, physid1, physid2);
+		tests->phy_ok = 1;
+		return 0;
+	}
+
+	EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
+	return -ENODEV;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+			    const char *buf_ptr, int pkt_len)
+{
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_payload *received;
+	struct efx_loopback_payload *payload;
+
+	BUG_ON(!buf_ptr);
+
+	/* If we are just flushing, then drop the packet */
+	if ((state == NULL) || state->flush)
+		return;
+
+	payload = &state->payload;
+	
+	received = (struct efx_loopback_payload *)(char *) buf_ptr;
+	received->ip.saddr = payload->ip.saddr;
+	received->ip.check = payload->ip.check;
+	
+	/* Check that header exists */
+	if (pkt_len < sizeof(received->header)) {
+		EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
+			"test\n", pkt_len, LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that the ethernet header exists */
+	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+		EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
+			LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check packet length */
+	if (pkt_len != sizeof(*payload)) {
+		EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
+			"%s loopback test\n", pkt_len, (int)sizeof(*payload),
+			LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that IP header matches */
+	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+		EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
+			LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that msg and padding matches */
+	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+		EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
+			LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that iteration matches */
+	if (received->iteration != payload->iteration) {
+		EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
+			"%s loopback test\n", ntohs(received->iteration),
+			ntohs(payload->iteration), LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Increase correct RX count */
+	EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
+		  LOOPBACK_MODE(efx));
+
+	atomic_inc(&state->rx_good);
+	return;
+
+ err:
+#ifdef EFX_ENABLE_DEBUG
+	if (atomic_read(&state->rx_bad) == 0) {
+		EFX_ERR(efx, "received packet:\n");
+		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+			       buf_ptr, pkt_len, 0);
+		EFX_ERR(efx, "expected packet:\n");
+		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+			       &state->payload, sizeof(state->payload), 0);
+	}
+#endif
+	atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct net_device *net_dev = efx->net_dev;
+	struct efx_loopback_payload *payload = &state->payload;
+
+	/* Initialise the layerII header */
+	memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
+	memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+	payload->header.h_proto = htons(ETH_P_IP);
+
+	/* saddr set later and used as incrementing count */
+	payload->ip.daddr = htonl(INADDR_LOOPBACK);
+	payload->ip.ihl = 5;
+	payload->ip.check = htons(0xdead);
+	payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+	payload->ip.version = IPVERSION;
+	payload->ip.protocol = IPPROTO_UDP;
+
+	/* Initialise udp header */
+	payload->udp.source = 0;
+	payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+				 sizeof(struct iphdr));
+	payload->udp.check = 0;	/* checksum ignored */
+
+	/* Fill out payload */
+	payload->iteration = htons(ntohs(payload->iteration) + 1);
+	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+	/* Fill out remaining state members */
+	atomic_set(&state->rx_good, 0);
+	atomic_set(&state->rx_bad, 0);
+	smp_wmb();
+}
+
+static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_loopback_payload *payload;
+	struct sk_buff *skb;
+	int i, rc;
+
+	/* Transmit N copies of buffer */
+	for (i = 0; i < state->packet_count; i++) {
+		/* Allocate an skb, holding an extra reference for 
+		 * transmit completion counting */
+		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+		state->skbs[i] = skb;
+		skb_get(skb);
+
+		/* Copy the payload in, incrementing the source address to
+		 * exercise the rss vectors */
+		payload = ((struct efx_loopback_payload *)
+			   skb_put(skb, sizeof(state->payload)));
+		memcpy(payload, &state->payload, sizeof(state->payload));
+		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+		/* Ensure everything we've written is visible to the
+		 * interrupt handler. */
+		smp_wmb();
+
+		if (NET_DEV_REGISTERED(efx))
+			netif_tx_lock_bh(efx->net_dev);
+		rc = efx_xmit(efx, tx_queue, skb);
+		if (NET_DEV_REGISTERED(efx))
+			netif_tx_unlock_bh(efx->net_dev);
+
+		if (rc != NETDEV_TX_OK) {
+			EFX_ERR(efx, "TX queue %d could not transmit packet %d "
+				"of %d in %s loopback test\n", tx_queue->queue,
+				i + 1, state->packet_count, LOOPBACK_MODE(efx));
+
+			/* Defer cleaning up the other skbs for the caller */
+			kfree_skb(skb);
+			return -EPIPE;
+		}
+	}
+
+	return 0;
+}
+
+static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
+			   struct efx_loopback_self_tests *lb_tests)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct sk_buff *skb;
+	int tx_done = 0, rx_good, rx_bad;
+	int i, rc = 0;
+
+	if (NET_DEV_REGISTERED(efx))
+		netif_tx_lock_bh(efx->net_dev);
+
+	/* Count the number of tx completions, and decrement the refcnt. Any
+	 * skbs not already completed will be free'd when the queue is flushed */
+	for (i=0; i < state->packet_count; i++) {
+		skb = state->skbs[i];
+		if (skb && !skb_shared(skb))
+			++tx_done;
+		dev_kfree_skb_any(skb);
+	}
+
+	if (NET_DEV_REGISTERED(efx))
+		netif_tx_unlock_bh(efx->net_dev);
+
+	/* Check TX completion and received packet counts */
+	rx_good = atomic_read(&state->rx_good);
+	rx_bad = atomic_read(&state->rx_bad);
+	if (tx_done != state->packet_count) {
+		/* Don't free the skbs; they will be picked up on TX
+		 * overflow or channel teardown.
+		 */
+		EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
+			"TX completion events in %s loopback test\n",
+			tx_queue->queue, tx_done, state->packet_count,
+			LOOPBACK_MODE(efx));
+		rc = -ETIMEDOUT;
+		/* Allow to fall through so we see the RX errors as well */
+	}
+
+	/* We may always be up to a flush away from our desired packet total */
+	if (rx_good != state->packet_count) {
+		EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
+			"received packets in %s loopback test\n",
+			tx_queue->queue, rx_good, state->packet_count,
+			LOOPBACK_MODE(efx));
+		rc = -ETIMEDOUT;
+		/* Fall through */
+	}
+
+	/* Update loopback test structure */
+	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+	lb_tests->tx_done[tx_queue->queue] += tx_done;
+	lb_tests->rx_good += rx_good;
+	lb_tests->rx_bad += rx_bad;
+
+	return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+		  struct efx_loopback_self_tests *lb_tests)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct efx_channel *channel;
+	int i, rc = 0;
+
+	for (i = 0; i < loopback_test_level; i++) {
+		/* Determine how many packets to send */
+		state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
+		state->packet_count = min(1 << (i << 2), state->packet_count);
+		state->skbs = kzalloc(sizeof(state->skbs[0]) *
+				      state->packet_count, GFP_KERNEL);
+		state->flush = 0;
+
+		EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
+			"packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+			state->packet_count);
+
+		efx_iterate_state(efx);
+		rc = efx_tx_loopback(tx_queue);
+		
+		/* NAPI polling is not enabled, so process channels synchronously */
+		schedule_timeout_uninterruptible(HZ / 50);
+		efx_for_each_channel_with_interrupt(channel, efx) {
+			if (channel->work_pending)
+				efx_process_channel_now(channel);
+		}
+
+		rc |= efx_rx_loopback(tx_queue, lb_tests);
+		kfree(state->skbs);
+
+		if (rc) {
+			/* Wait a while to ensure there are no packets
+			 * floating around after a failure. */
+			schedule_timeout_uninterruptible(HZ / 10);
+			return rc;
+		}
+	}
+
+	EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
+		"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+		state->packet_count);
+
+	return rc;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx,
+			      struct efx_self_tests *tests,
+			      unsigned int loopback_modes)
+{
+	struct efx_selftest_state *state = efx->loopback_selftest;
+	struct ethtool_cmd ecmd, ecmd_loopback;
+	struct efx_tx_queue *tx_queue;
+	enum efx_loopback_mode old_mode, mode;
+	int count, rc = 0, link_up;
+	
+	rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
+	if (rc) {
+		EFX_ERR(efx, "could not get GMII settings\n");
+		return rc;
+	}
+	old_mode = efx->loopback_mode;
+
+	/* Disable autonegotiation for the purposes of loopback */
+	memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
+	if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
+		ecmd_loopback.autoneg = AUTONEG_DISABLE;
+		ecmd_loopback.duplex = DUPLEX_FULL;
+		ecmd_loopback.speed = SPEED_10000;
+	}
+
+	rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
+	if (rc) {
+		EFX_ERR(efx, "could not disable autonegotiation\n");
+		goto out;
+	}
+	tests->loopback_speed = ecmd_loopback.speed;
+	tests->loopback_full_duplex = ecmd_loopback.duplex;
+
+	/* Test all supported loopback modes */
+	for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+		if (!(loopback_modes & (1 << mode)))
+			continue;
+
+		/* Move the port into the specified loopback mode. */
+		state->flush = 1;
+		efx->loopback_mode = mode;
+		efx_reconfigure_port(efx);
+
+		/* Wait for the PHY to signal the link is up */
+		count = 0;
+		do {
+			struct efx_channel *channel = &efx->channel[0];
+
+			falcon_check_xmac(efx);
+			schedule_timeout_uninterruptible(HZ / 10);
+			if (channel->work_pending)
+				efx_process_channel_now(channel);
+			/* Wait for PHY events to be processed */
+			flush_workqueue(efx->workqueue);
+			rmb();
+
+			/* efx->link_up can be 1 even if the XAUI link is down,
+			 * (bug5762). Usually, it's not worth bothering with the
+			 * difference, but for selftests, we need that extra
+			 * guarantee that the link is really, really, up.
+			 */
+			link_up = efx->link_up;
+			if (!falcon_xaui_link_ok(efx))
+				link_up = 0;
+
+		} while ((++count < 20) && !link_up);
+
+		/* The link should now be up. If it isn't, there is no point
+		 * in attempting a loopback test */
+		if (!link_up) {
+			EFX_ERR(efx, "loopback %s never came up\n",
+				LOOPBACK_MODE(efx));
+			rc = -EIO;
+			goto out;
+		}
+
+		EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
+			LOOPBACK_MODE(efx), count);
+
+		/* Test every TX queue */
+		efx_for_each_tx_queue(tx_queue, efx) {
+			rc |= efx_test_loopback(tx_queue,
+						&tests->loopback[mode]);
+			if (rc)
+				goto out;
+		}
+	}
+
+ out:
+	/* Take out of loopback and restore PHY settings */
+	state->flush = 1;
+	efx->loopback_mode = old_mode;
+	efx_ethtool_set_settings(efx->net_dev, &ecmd);
+
+	return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry points
+ *
+ *************************************************************************/
+
+/* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	struct efx_channel *channel;
+	int rc = 0;
+
+	EFX_LOG(efx, "performing online self-tests\n");
+
+	rc |= efx_test_interrupts(efx, tests);
+	efx_for_each_channel(channel, efx) {
+		if (channel->has_interrupt)
+			rc |= efx_test_eventq_irq(channel, tests);
+		else
+			rc |= efx_test_eventq(channel, tests);
+	}
+	rc |= efx_test_phy(efx, tests);
+
+	if (rc)
+		EFX_ERR(efx, "failed online self-tests\n");
+
+	return rc;
+}
+
+/* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+int efx_offline_test(struct efx_nic *efx,
+		     struct efx_self_tests *tests, unsigned int loopback_modes)
+{
+	struct efx_selftest_state *state;
+	int rc = 0;
+
+	EFX_LOG(efx, "performing offline self-tests\n");
+
+	/* Create a selftest_state structure to hold state for the test */
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Set the port loopback_selftest member. From this point on
+	 * all received packets will be dropped. Mark the state as
+	 * "flushing" so all inflight packets are dropped */
+	BUG_ON(efx->loopback_selftest);
+	state->flush = 1;
+	efx->loopback_selftest = (void *)state;
+
+	rc = efx_test_loopbacks(efx, tests, loopback_modes);
+
+	efx->loopback_selftest = NULL;
+	wmb();
+	kfree(state);
+
+ out:
+	if (rc)
+		EFX_ERR(efx, "failed offline self-tests\n");
+
+	return rc;
+}
+
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
new file mode 100644
index 0000000..f6999c2
--- /dev/null
+++ b/drivers/net/sfc/selftest.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+	int tx_sent[EFX_MAX_TX_QUEUES];
+	int tx_done[EFX_MAX_TX_QUEUES];
+	int rx_good;
+	int rx_bad;
+};
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure.
+ */
+struct efx_self_tests {
+	int interrupt;
+	int eventq_dma[EFX_MAX_CHANNELS];
+	int eventq_int[EFX_MAX_CHANNELS];
+	int eventq_poll[EFX_MAX_CHANNELS];
+	int phy_ok;
+	int loopback_speed;
+	int loopback_full_duplex;
+	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
+};
+
+extern void efx_loopback_rx_packet(struct efx_nic *efx,
+				   const char *buf_ptr, int pkt_len);
+extern int efx_online_test(struct efx_nic *efx,
+			   struct efx_self_tests *tests);
+extern int efx_offline_test(struct efx_nic *efx,
+			    struct efx_self_tests *tests,
+			    unsigned int loopback_modes);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 11fa9fb..725d1a5 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -130,6 +130,15 @@
 	(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
 }
 
+/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
+ * to the FLASH_CFG_1 input on the DSP.  We must keep it high at power-
+ * up to allow writing the flash (done through MDIO from userland).
+ */
+unsigned int sfe4001_phy_flash_cfg;
+module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
+MODULE_PARM_DESC(phy_flash_cfg,
+		 "Force PHY to enter flash configuration mode");
+
 /* This board uses an I2C expander to provider power to the PHY, which needs to
  * be turned on before the PHY can be used.
  * Context: Process context, rtnl lock held
@@ -203,6 +212,8 @@
 		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
 			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
 			       (1 << P0_X_TRST_LBN));
+		if (sfe4001_phy_flash_cfg)
+			out |= 1 << P0_EN_3V3X_LBN;
 
 		rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
 		if (rc)
@@ -226,6 +237,9 @@
 		if (in & (1 << P1_AFE_PWD_LBN))
 			goto done;
 
+		/* DSP doesn't look powered in flash config mode */
+		if (sfe4001_phy_flash_cfg)
+			goto done;
 	} while (++count < 20);
 
 	EFX_INFO(efx, "timed out waiting for power\n");
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index a2e9f79..b1cd6de 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -24,6 +24,11 @@
 				 MDIO_MMDREG_DEVS0_PCS    | \
 				 MDIO_MMDREG_DEVS0_PHYXS)
 
+#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) |	\
+			     (1 << LOOPBACK_PCS) |	\
+			     (1 << LOOPBACK_PMAPMD) |	\
+			     (1 << LOOPBACK_NETWORK))
+
 /* We complain if we fail to see the link partner as 10G capable this many
  * times in a row (must be > 1 as sampling the autoneg. registers is racy)
  */
@@ -72,6 +77,10 @@
 #define PMA_PMD_BIST_RXD_LBN	(1)
 #define PMA_PMD_BIST_AFE_LBN	(0)
 
+/* Special Software reset register */
+#define PMA_PMD_EXT_CTRL_REG 49152
+#define PMA_PMD_EXT_SSR_LBN 15
+
 #define BIST_MAX_DELAY	(1000)
 #define BIST_POLL_DELAY	(10)
 
@@ -86,6 +95,11 @@
 #define	PCS_TEST_SELECT_REG 0xd807	/* PRM 10.5.8 */
 #define	CLK312_EN_LBN 3
 
+/* PHYXS registers */
+#define PHYXS_TEST1         (49162)
+#define LOOPBACK_NEAR_LBN   (8)
+#define LOOPBACK_NEAR_WIDTH (1)
+
 /* Boot status register */
 #define PCS_BOOT_STATUS_REG	(0xd000)
 #define PCS_BOOT_FATAL_ERR_LBN	(0)
@@ -106,7 +120,9 @@
 
 struct tenxpress_phy_data {
 	enum tenxpress_state state;
+	enum efx_loopback_mode loopback_mode;
 	atomic_t bad_crc_count;
+	int tx_disabled;
 	int bad_lp_tries;
 };
 
@@ -199,10 +215,12 @@
 
 	tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
 
-	rc = mdio_clause45_wait_reset_mmds(efx,
-					   TENXPRESS_REQUIRED_DEVS);
-	if (rc < 0)
-		goto fail;
+	if (!sfe4001_phy_flash_cfg) {
+		rc = mdio_clause45_wait_reset_mmds(efx,
+						   TENXPRESS_REQUIRED_DEVS);
+		if (rc < 0)
+			goto fail;
+	}
 
 	rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
 	if (rc < 0)
@@ -225,6 +243,35 @@
 	return rc;
 }
 
+static int tenxpress_special_reset(struct efx_nic *efx)
+{
+	int rc, reg;
+
+	EFX_TRACE(efx, "%s\n", __func__);
+
+	/* Initiate reset */
+	reg = mdio_clause45_read(efx, efx->mii.phy_id,
+				 MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
+	reg |= (1 << PMA_PMD_EXT_SSR_LBN);
+	mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+			    PMA_PMD_EXT_CTRL_REG, reg);
+
+	msleep(200);
+
+	/* Wait for the blocks to come out of reset */
+	rc = mdio_clause45_wait_reset_mmds(efx,
+					   TENXPRESS_REQUIRED_DEVS);
+	if (rc < 0)
+		return rc;
+
+	/* Try and reconfigure the device */
+	rc = tenxpress_init(efx);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
 static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
 {
 	struct tenxpress_phy_data *pd = efx->phy_data;
@@ -299,11 +346,46 @@
 	return ok;
 }
 
+static void tenxpress_phyxs_loopback(struct efx_nic *efx)
+{
+	int phy_id = efx->mii.phy_id;
+	int ctrl1, ctrl2;
+
+	ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+					   PHYXS_TEST1);
+	if (efx->loopback_mode == LOOPBACK_PHYXS)
+		ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
+	else
+		ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
+	if (ctrl1 != ctrl2)
+		mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+				    PHYXS_TEST1, ctrl2);
+}
+
 static void tenxpress_phy_reconfigure(struct efx_nic *efx)
 {
+	struct tenxpress_phy_data *phy_data = efx->phy_data;
+	int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
+					  TENXPRESS_LOOPBACKS);
+
 	if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
 		return;
 
+	/* When coming out of transmit disable, coming out of low power
+	 * mode, or moving out of any PHY internal loopback mode,
+	 * perform a special software reset */
+	if ((phy_data->tx_disabled && !efx->tx_disabled) ||
+	    loop_change) {
+		(void) tenxpress_special_reset(efx);
+		falcon_reset_xaui(efx);
+	}
+
+	mdio_clause45_transmit_disable(efx);
+	mdio_clause45_phy_reconfigure(efx);
+	tenxpress_phyxs_loopback(efx);
+
+	phy_data->tx_disabled = efx->tx_disabled;
+	phy_data->loopback_mode = efx->loopback_mode;
 	efx->link_up = tenxpress_link_ok(efx, 0);
 	efx->link_options = GM_LPA_10000FULL;
 }
@@ -431,4 +513,5 @@
 	.clear_interrupt  = tenxpress_phy_clear_interrupt,
 	.reset_xaui       = tenxpress_reset_xaui,
 	.mmds             = TENXPRESS_REQUIRED_DEVS,
+	.loopbacks        = TENXPRESS_LOOPBACKS,
 };
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index fbb866b..9b436f5 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -82,6 +82,46 @@
 	}
 }
 
+/**
+ * struct efx_tso_header - a DMA mapped buffer for packet headers
+ * @next: Linked list of free ones.
+ *	The list is protected by the TX queue lock.
+ * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
+ * @dma_addr: The DMA address of the header below.
+ *
+ * This controls the memory used for a TSO header.  Use TSOH_DATA()
+ * to find the packet header data.  Use TSOH_SIZE() to calculate the
+ * total size required for a given packet header length.  TSO headers
+ * in the free list are exactly %TSOH_STD_SIZE bytes in size.
+ */
+struct efx_tso_header {
+	union {
+		struct efx_tso_header *next;
+		size_t unmap_len;
+	};
+	dma_addr_t dma_addr;
+};
+
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+			       const struct sk_buff *skb);
+static void efx_fini_tso(struct efx_tx_queue *tx_queue);
+static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
+			       struct efx_tso_header *tsoh);
+
+static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+				 struct efx_tx_buffer *buffer)
+{
+	if (buffer->tsoh) {
+		if (likely(!buffer->tsoh->unmap_len)) {
+			buffer->tsoh->next = tx_queue->tso_headers_free;
+			tx_queue->tso_headers_free = buffer->tsoh;
+		} else {
+			efx_tsoh_heap_free(tx_queue, buffer->tsoh);
+		}
+		buffer->tsoh = NULL;
+	}
+}
+
 
 /*
  * Add a socket buffer to a TX queue
@@ -114,6 +154,9 @@
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
+	if (skb_shinfo((struct sk_buff *)skb)->gso_size)
+		return efx_enqueue_skb_tso(tx_queue, skb);
+
 	/* Get size of the initial fragment */
 	len = skb_headlen(skb);
 
@@ -166,6 +209,8 @@
 			insert_ptr = (tx_queue->insert_count &
 				      efx->type->txd_ring_mask);
 			buffer = &tx_queue->buffer[insert_ptr];
+			efx_tsoh_free(tx_queue, buffer);
+			EFX_BUG_ON_PARANOID(buffer->tsoh);
 			EFX_BUG_ON_PARANOID(buffer->skb);
 			EFX_BUG_ON_PARANOID(buffer->len);
 			EFX_BUG_ON_PARANOID(buffer->continuation != 1);
@@ -432,6 +477,9 @@
 
 	efx_release_tx_buffers(tx_queue);
 
+	/* Free up TSO header cache */
+	efx_fini_tso(tx_queue);
+
 	/* Release queue's stop on port, if any */
 	if (tx_queue->stopped) {
 		tx_queue->stopped = 0;
@@ -450,3 +498,619 @@
 }
 
 
+/* Efx TCP segmentation acceleration.
+ *
+ * Why?  Because by doing it here in the driver we can go significantly
+ * faster than the GSO.
+ *
+ * Requires TX checksum offload support.
+ */
+
+/* Number of bytes inserted at the start of a TSO header buffer,
+ * similar to NET_IP_ALIGN.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#define TSOH_OFFSET	0
+#else
+#define TSOH_OFFSET	NET_IP_ALIGN
+#endif
+
+#define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET)
+
+/* Total size of struct efx_tso_header, buffer and padding */
+#define TSOH_SIZE(hdr_len)					\
+	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
+
+/* Size of blocks on free list.  Larger blocks must be allocated from
+ * the heap.
+ */
+#define TSOH_STD_SIZE		128
+
+#define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
+#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
+#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
+#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @remaining_len: Bytes of data we've yet to segment
+ * @seqnum: Current sequence number
+ * @packet_space: Remaining space in current packet
+ * @ifc: Input fragment cursor.
+ *	Where we are in the current fragment of the incoming SKB.  These
+ *	values get updated in place when we split a fragment over
+ *	multiple packets.
+ * @p: Parameters.
+ *	These values are set once at the start of the TSO send and do
+ *	not get changed as the routine progresses.
+ *
+ * The state used during segmentation.  It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+	unsigned remaining_len;
+	unsigned seqnum;
+	unsigned packet_space;
+
+	struct {
+		/* DMA address of current position */
+		dma_addr_t dma_addr;
+		/* Remaining length */
+		unsigned int len;
+		/* DMA address and length of the whole fragment */
+		unsigned int unmap_len;
+		dma_addr_t unmap_addr;
+		struct page *page;
+		unsigned page_off;
+	} ifc;
+
+	struct {
+		/* The number of bytes of header */
+		unsigned int header_length;
+
+		/* The number of bytes to put in each outgoing segment. */
+		int full_packet_size;
+
+		/* Current IPv4 ID, host endian. */
+		unsigned ipv4_id;
+	} p;
+};
+
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true.
+ */
+static inline void efx_tso_check_safe(const struct sk_buff *skb)
+{
+	EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
+	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+			    skb->protocol);
+	EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+			     + (tcp_hdr(skb)->doff << 2u)) >
+			    skb_headlen(skb));
+}
+
+
+/*
+ * Allocate a page worth of efx_tso_header structures, and string them
+ * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
+ */
+static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+{
+
+	struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+	struct efx_tso_header *tsoh;
+	dma_addr_t dma_addr;
+	u8 *base_kva, *kva;
+
+	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+	if (base_kva == NULL) {
+		EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
+			" headers\n");
+		return -ENOMEM;
+	}
+
+	/* pci_alloc_consistent() allocates pages. */
+	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
+
+	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
+		tsoh = (struct efx_tso_header *)kva;
+		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
+		tsoh->next = tx_queue->tso_headers_free;
+		tx_queue->tso_headers_free = tsoh;
+	}
+
+	return 0;
+}
+
+
+/* Free up a TSO header, and all others in the same page. */
+static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
+				struct efx_tso_header *tsoh,
+				struct pci_dev *pci_dev)
+{
+	struct efx_tso_header **p;
+	unsigned long base_kva;
+	dma_addr_t base_dma;
+
+	base_kva = (unsigned long)tsoh & PAGE_MASK;
+	base_dma = tsoh->dma_addr & PAGE_MASK;
+
+	p = &tx_queue->tso_headers_free;
+	while (*p != NULL)
+		if (((unsigned long)*p & PAGE_MASK) == base_kva)
+			*p = (*p)->next;
+		else
+			p = &(*p)->next;
+
+	pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+}
+
+static struct efx_tso_header *
+efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
+{
+	struct efx_tso_header *tsoh;
+
+	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
+	if (unlikely(!tsoh))
+		return NULL;
+
+	tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+					TSOH_BUFFER(tsoh), header_len,
+					PCI_DMA_TODEVICE);
+	if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+		kfree(tsoh);
+		return NULL;
+	}
+
+	tsoh->unmap_len = header_len;
+	return tsoh;
+}
+
+static void
+efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
+{
+	pci_unmap_single(tx_queue->efx->pci_dev,
+			 tsoh->dma_addr, tsoh->unmap_len,
+			 PCI_DMA_TODEVICE);
+	kfree(tsoh);
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue:		Efx TX queue
+ * @dma_addr:		DMA address of fragment
+ * @len:		Length of fragment
+ * @skb:		Only non-null for end of last segment
+ * @end_of_packet:	True if last fragment in a packet
+ * @unmap_addr:		DMA address of fragment for unmapping
+ * @unmap_len:		Only set this in last segment of a fragment
+ *
+ * Push descriptors onto the TX queue.  Return 0 on success or 1 if
+ * @tx_queue full.
+ */
+static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+			       dma_addr_t dma_addr, unsigned len,
+			       const struct sk_buff *skb, int end_of_packet,
+			       dma_addr_t unmap_addr, unsigned unmap_len)
+{
+	struct efx_tx_buffer *buffer;
+	struct efx_nic *efx = tx_queue->efx;
+	unsigned dma_len, fill_level, insert_ptr, misalign;
+	int q_space;
+
+	EFX_BUG_ON_PARANOID(len <= 0);
+
+	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+	/* -1 as there is no way to represent all descriptors used */
+	q_space = efx->type->txd_ring_mask - 1 - fill_level;
+
+	while (1) {
+		if (unlikely(q_space-- <= 0)) {
+			/* It might be that completions have happened
+			 * since the xmit path last checked.  Update
+			 * the xmit path's copy of read_count.
+			 */
+			++tx_queue->stopped;
+			/* This memory barrier protects the change of
+			 * stopped from the access of read_count. */
+			smp_mb();
+			tx_queue->old_read_count =
+				*(volatile unsigned *)&tx_queue->read_count;
+			fill_level = (tx_queue->insert_count
+				      - tx_queue->old_read_count);
+			q_space = efx->type->txd_ring_mask - 1 - fill_level;
+			if (unlikely(q_space-- <= 0))
+				return 1;
+			smp_mb();
+			--tx_queue->stopped;
+		}
+
+		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+		buffer = &tx_queue->buffer[insert_ptr];
+		++tx_queue->insert_count;
+
+		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
+				    tx_queue->read_count >
+				    efx->type->txd_ring_mask);
+
+		efx_tsoh_free(tx_queue, buffer);
+		EFX_BUG_ON_PARANOID(buffer->len);
+		EFX_BUG_ON_PARANOID(buffer->unmap_len);
+		EFX_BUG_ON_PARANOID(buffer->skb);
+		EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+		EFX_BUG_ON_PARANOID(buffer->tsoh);
+
+		buffer->dma_addr = dma_addr;
+
+		/* Ensure we do not cross a boundary unsupported by H/W */
+		dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
+
+		misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
+		if (misalign && dma_len + misalign > 512)
+			dma_len = 512 - misalign;
+
+		/* If there is enough space to send then do so */
+		if (dma_len >= len)
+			break;
+
+		buffer->len = dma_len; /* Don't set the other members */
+		dma_addr += dma_len;
+		len -= dma_len;
+	}
+
+	EFX_BUG_ON_PARANOID(!len);
+	buffer->len = len;
+	buffer->skb = skb;
+	buffer->continuation = !end_of_packet;
+	buffer->unmap_addr = unmap_addr;
+	buffer->unmap_len = unmap_len;
+	return 0;
+}
+
+
+/*
+ * Put a TSO header into the TX queue.
+ *
+ * This is special-cased because we know that it is small enough to fit in
+ * a single fragment, and we know it doesn't cross a page boundary.  It
+ * also allows us to not worry about end-of-packet etc.
+ */
+static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+				      struct efx_tso_header *tsoh, unsigned len)
+{
+	struct efx_tx_buffer *buffer;
+
+	buffer = &tx_queue->buffer[tx_queue->insert_count &
+				   tx_queue->efx->type->txd_ring_mask];
+	efx_tsoh_free(tx_queue, buffer);
+	EFX_BUG_ON_PARANOID(buffer->len);
+	EFX_BUG_ON_PARANOID(buffer->unmap_len);
+	EFX_BUG_ON_PARANOID(buffer->skb);
+	EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+	EFX_BUG_ON_PARANOID(buffer->tsoh);
+	buffer->len = len;
+	buffer->dma_addr = tsoh->dma_addr;
+	buffer->tsoh = tsoh;
+
+	++tx_queue->insert_count;
+}
+
+
+/* Remove descriptors put into a tx_queue. */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+{
+	struct efx_tx_buffer *buffer;
+
+	/* Work backwards until we hit the original insert pointer value */
+	while (tx_queue->insert_count != tx_queue->write_count) {
+		--tx_queue->insert_count;
+		buffer = &tx_queue->buffer[tx_queue->insert_count &
+					   tx_queue->efx->type->txd_ring_mask];
+		efx_tsoh_free(tx_queue, buffer);
+		EFX_BUG_ON_PARANOID(buffer->skb);
+		buffer->len = 0;
+		buffer->continuation = 1;
+		if (buffer->unmap_len) {
+			pci_unmap_page(tx_queue->efx->pci_dev,
+				       buffer->unmap_addr,
+				       buffer->unmap_len, PCI_DMA_TODEVICE);
+			buffer->unmap_len = 0;
+		}
+	}
+}
+
+
+/* Parse the SKB header and initialise state. */
+static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+{
+	/* All ethernet/IP/TCP headers combined size is TCP header size
+	 * plus offset of TCP header relative to start of packet.
+	 */
+	st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
+			       + PTR_DIFF(tcp_hdr(skb), skb->data));
+	st->p.full_packet_size = (st->p.header_length
+				  + skb_shinfo(skb)->gso_size);
+
+	st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
+	st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
+	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
+	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
+
+	st->packet_space = st->p.full_packet_size;
+	st->remaining_len = skb->len - st->p.header_length;
+}
+
+
+/**
+ * tso_get_fragment - record fragment details and map for DMA
+ * @st:			TSO state
+ * @efx:		Efx NIC
+ * @data:		Pointer to fragment data
+ * @len:		Length of fragment
+ *
+ * Record fragment details and map for DMA.  Return 0 on success, or
+ * -%ENOMEM if DMA mapping fails.
+ */
+static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+				   int len, struct page *page, int page_off)
+{
+
+	st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
+					  len, PCI_DMA_TODEVICE);
+	if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+		st->ifc.unmap_len = len;
+		st->ifc.len = len;
+		st->ifc.dma_addr = st->ifc.unmap_addr;
+		st->ifc.page = page;
+		st->ifc.page_off = page_off;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue:		Efx TX queue
+ * @skb:		Socket buffer
+ * @st:			TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
+ * space in @tx_queue.
+ */
+static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+						const struct sk_buff *skb,
+						struct tso_state *st)
+{
+
+	int n, end_of_packet, rc;
+
+	if (st->ifc.len == 0)
+		return 0;
+	if (st->packet_space == 0)
+		return 0;
+
+	EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
+	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
+
+	n = min(st->ifc.len, st->packet_space);
+
+	st->packet_space -= n;
+	st->remaining_len -= n;
+	st->ifc.len -= n;
+	st->ifc.page_off += n;
+	end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
+
+	rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
+				 st->remaining_len ? NULL : skb,
+				 end_of_packet, st->ifc.unmap_addr,
+				 st->ifc.len ? 0 : st->ifc.unmap_len);
+
+	st->ifc.dma_addr += n;
+
+	return rc;
+}
+
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue:		Efx TX queue
+ * @skb:		Socket buffer
+ * @st:			TSO state
+ *
+ * Generate a new header and prepare for the new packet.  Return 0 on
+ * success, or -1 if failed to alloc header.
+ */
+static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+				       const struct sk_buff *skb,
+				       struct tso_state *st)
+{
+	struct efx_tso_header *tsoh;
+	struct iphdr *tsoh_iph;
+	struct tcphdr *tsoh_th;
+	unsigned ip_length;
+	u8 *header;
+
+	/* Allocate a DMA-mapped header buffer. */
+	if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
+		if (tx_queue->tso_headers_free == NULL)
+			if (efx_tsoh_block_alloc(tx_queue))
+				return -1;
+		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
+		tsoh = tx_queue->tso_headers_free;
+		tx_queue->tso_headers_free = tsoh->next;
+		tsoh->unmap_len = 0;
+	} else {
+		tx_queue->tso_long_headers++;
+		tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
+		if (unlikely(!tsoh))
+			return -1;
+	}
+
+	header = TSOH_BUFFER(tsoh);
+	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+	tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+
+	/* Copy and update the headers. */
+	memcpy(header, skb->data, st->p.header_length);
+
+	tsoh_th->seq = htonl(st->seqnum);
+	st->seqnum += skb_shinfo(skb)->gso_size;
+	if (st->remaining_len > skb_shinfo(skb)->gso_size) {
+		/* This packet will not finish the TSO burst. */
+		ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
+		tsoh_th->fin = 0;
+		tsoh_th->psh = 0;
+	} else {
+		/* This packet will be the last in the TSO burst. */
+		ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
+			     + st->remaining_len);
+		tsoh_th->fin = tcp_hdr(skb)->fin;
+		tsoh_th->psh = tcp_hdr(skb)->psh;
+	}
+	tsoh_iph->tot_len = htons(ip_length);
+
+	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
+	tsoh_iph->id = htons(st->p.ipv4_id);
+	st->p.ipv4_id++;
+
+	st->packet_space = skb_shinfo(skb)->gso_size;
+	++tx_queue->tso_packets;
+
+	/* Form a descriptor for this header. */
+	efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
+
+	return 0;
+}
+
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue:		Efx TX queue
+ * @skb:		Socket buffer
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued.  In all cases @skb is consumed.  Return
+ * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ */
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+			       const struct sk_buff *skb)
+{
+	int frag_i, rc, rc2 = NETDEV_TX_OK;
+	struct tso_state state;
+	skb_frag_t *f;
+
+	/* Verify TSO is safe - these checks should never fail. */
+	efx_tso_check_safe(skb);
+
+	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+	tso_start(&state, skb);
+
+	/* Assume that skb header area contains exactly the headers, and
+	 * all payload is in the frag list.
+	 */
+	if (skb_headlen(skb) == state.p.header_length) {
+		/* Grab the first payload fragment. */
+		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+		frag_i = 0;
+		f = &skb_shinfo(skb)->frags[frag_i];
+		rc = tso_get_fragment(&state, tx_queue->efx,
+				      f->size, f->page, f->page_offset);
+		if (rc)
+			goto mem_err;
+	} else {
+		/* It may look like this code fragment assumes that the
+		 * skb->data portion does not cross a page boundary, but
+		 * that is not the case.  It is guaranteed to be direct
+		 * mapped memory, and therefore is physically contiguous,
+		 * and so DMA will work fine.  kmap_atomic() on this region
+		 * will just return the direct mapping, so that will work
+		 * too.
+		 */
+		int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
+		int hl = state.p.header_length;
+		rc = tso_get_fragment(&state, tx_queue->efx,
+				      skb_headlen(skb) - hl,
+				      virt_to_page(skb->data), page_off + hl);
+		if (rc)
+			goto mem_err;
+		frag_i = -1;
+	}
+
+	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
+		goto mem_err;
+
+	while (1) {
+		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
+		if (unlikely(rc))
+			goto stop;
+
+		/* Move onto the next fragment? */
+		if (state.ifc.len == 0) {
+			if (++frag_i >= skb_shinfo(skb)->nr_frags)
+				/* End of payload reached. */
+				break;
+			f = &skb_shinfo(skb)->frags[frag_i];
+			rc = tso_get_fragment(&state, tx_queue->efx,
+					      f->size, f->page, f->page_offset);
+			if (rc)
+				goto mem_err;
+		}
+
+		/* Start at new packet? */
+		if (state.packet_space == 0 &&
+		    tso_start_new_packet(tx_queue, skb, &state) < 0)
+			goto mem_err;
+	}
+
+	/* Pass off to hardware */
+	falcon_push_buffers(tx_queue);
+
+	tx_queue->tso_bursts++;
+	return NETDEV_TX_OK;
+
+ mem_err:
+	EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
+		" error\n");
+	dev_kfree_skb_any((struct sk_buff *)skb);
+	goto unwind;
+
+ stop:
+	rc2 = NETDEV_TX_BUSY;
+
+	/* Stop the queue if it wasn't stopped before. */
+	if (tx_queue->stopped == 1)
+		efx_stop_queue(tx_queue->efx);
+
+ unwind:
+	efx_enqueue_unwind(tx_queue);
+	return rc2;
+}
+
+
+/*
+ * Free up all TSO datastructures associated with tx_queue. This
+ * routine should be called only once the tx_queue is both empty and
+ * will no longer be used.
+ */
+static void efx_fini_tso(struct efx_tx_queue *tx_queue)
+{
+	unsigned i;
+
+	if (tx_queue->buffer)
+		for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
+			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+
+	while (tx_queue->tso_headers_free != NULL)
+		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+				    tx_queue->efx->pci_dev);
+}
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 66dd5bf..3b9f9dd 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -24,6 +24,10 @@
 			   MDIO_MMDREG_DEVS0_PMAPMD |	\
 			   MDIO_MMDREG_DEVS0_PHYXS)
 
+#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) |		\
+		       (1 << LOOPBACK_PMAPMD) |		\
+		       (1 << LOOPBACK_NETWORK))
+
 /****************************************************************************/
 /* Quake-specific MDIO registers */
 #define MDIO_QUAKE_LED0_REG	(0xD006)
@@ -35,6 +39,10 @@
 			    mode);
 }
 
+struct xfp_phy_data {
+	int tx_disabled;
+};
+
 #define XFP_MAX_RESET_TIME 500
 #define XFP_RESET_WAIT 10
 
@@ -72,18 +80,31 @@
 
 static int xfp_phy_init(struct efx_nic *efx)
 {
+	struct xfp_phy_data *phy_data;
 	u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
 	int rc;
 
+	phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
+	efx->phy_data = (void *) phy_data;
+
 	EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
 		 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
 		 MDIO_ID_REV(devid));
 
+	phy_data->tx_disabled = efx->tx_disabled;
+
 	rc = xfp_reset_phy(efx);
 
 	EFX_INFO(efx, "XFP: PHY init %s.\n",
 		 rc ? "failed" : "successful");
+	if (rc < 0)
+		goto fail;
 
+	return 0;
+
+ fail:
+	kfree(efx->phy_data);
+	efx->phy_data = NULL;
 	return rc;
 }
 
@@ -110,6 +131,16 @@
 
 static void xfp_phy_reconfigure(struct efx_nic *efx)
 {
+	struct xfp_phy_data *phy_data = efx->phy_data;
+
+	/* Reset the PHY when moving from tx off to tx on */
+	if (phy_data->tx_disabled && !efx->tx_disabled)
+		xfp_reset_phy(efx);
+
+	mdio_clause45_transmit_disable(efx);
+	mdio_clause45_phy_reconfigure(efx);
+
+	phy_data->tx_disabled = efx->tx_disabled;
 	efx->link_up = xfp_link_ok(efx);
 	efx->link_options = GM_LPA_10000FULL;
 }
@@ -119,6 +150,10 @@
 {
 	/* Clobber the LED if it was blinking */
 	efx->board_info.blink(efx, 0);
+
+	/* Free the context block */
+	kfree(efx->phy_data);
+	efx->phy_data = NULL;
 }
 
 struct efx_phy_operations falcon_xfp_phy_ops = {
@@ -129,4 +164,5 @@
 	.clear_interrupt = xfp_phy_clear_interrupt,
 	.reset_xaui      = efx_port_dummy_op_void,
 	.mmds            = XFP_REQUIRED_DEVS,
+	.loopbacks       = XFP_LOOPBACKS,
 };
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 7bb3ba9..c0a5eea 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1966,13 +1966,13 @@
 struct tx_ring_info {
 	struct sk_buff	*skb;
 	DECLARE_PCI_UNMAP_ADDR(mapaddr);
-	DECLARE_PCI_UNMAP_ADDR(maplen);
+	DECLARE_PCI_UNMAP_LEN(maplen);
 };
 
 struct rx_ring_info {
 	struct sk_buff	*skb;
 	dma_addr_t	data_addr;
-	DECLARE_PCI_UNMAP_ADDR(data_size);
+	DECLARE_PCI_UNMAP_LEN(data_size);
 	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
 };
 
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 8005dd1..d5140ae 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -150,11 +150,9 @@
 
 config HDLC_PPP
 	tristate "Synchronous Point-to-Point Protocol (PPP) support"
-	depends on HDLC && BROKEN
+	depends on HDLC
 	help
 	  Generic HDLC driver supporting PPP over WAN connections.
-	  This module is currently broken and will cause a kernel panic
-	  when a device configured in PPP mode is activated.
 
 	  It will be replaced by new PPP implementation in Linux 2.6.26.
 
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 45ddfc9..b0fce13 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -629,7 +629,7 @@
 	d->base_addr = chan->cosa->datareg;
 	d->irq = chan->cosa->irq;
 	d->dma = chan->cosa->dma;
-	d->priv = chan;
+	d->ml_priv = chan;
 	sppp_attach(&chan->pppdev);
 	if (register_netdev(d)) {
 		printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
@@ -650,7 +650,7 @@
 
 static int cosa_sppp_open(struct net_device *d)
 {
-	struct channel_data *chan = d->priv;
+	struct channel_data *chan = d->ml_priv;
 	int err;
 	unsigned long flags;
 
@@ -690,7 +690,7 @@
 
 static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
 {
-	struct channel_data *chan = dev->priv;
+	struct channel_data *chan = dev->ml_priv;
 
 	netif_stop_queue(dev);
 
@@ -701,7 +701,7 @@
 
 static void cosa_sppp_timeout(struct net_device *dev)
 {
-	struct channel_data *chan = dev->priv;
+	struct channel_data *chan = dev->ml_priv;
 
 	if (test_bit(RXBIT, &chan->cosa->rxtx)) {
 		chan->stats.rx_errors++;
@@ -720,7 +720,7 @@
 
 static int cosa_sppp_close(struct net_device *d)
 {
-	struct channel_data *chan = d->priv;
+	struct channel_data *chan = d->ml_priv;
 	unsigned long flags;
 
 	netif_stop_queue(d);
@@ -800,7 +800,7 @@
 
 static struct net_device_stats *cosa_net_stats(struct net_device *dev)
 {
-	struct channel_data *chan = dev->priv;
+	struct channel_data *chan = dev->ml_priv;
 	return &chan->stats;
 }
 
@@ -1217,7 +1217,7 @@
 	int cmd)
 {
 	int rv;
-	struct channel_data *chan = dev->priv;
+	struct channel_data *chan = dev->ml_priv;
 	rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
 	if (rv == -ENOIOCTLCMD) {
 		return sppp_do_ioctl(dev, ifr, cmd);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 10396d9..0030833 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -45,7 +45,7 @@
 	int (*old_ioctl)(struct net_device *, struct ifreq *, int);
 	int result;
 
-	dev->priv = &state(hdlc)->syncppp_ptr;
+	dev->ml_priv = &state(hdlc)->syncppp_ptr;
 	state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev;
 	state(hdlc)->pppdev.dev = dev;
 
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 83dbc92..f3065d3 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -75,7 +75,7 @@
  
 static int hostess_open(struct net_device *d)
 {
-	struct sv11_device *sv11=d->priv;
+	struct sv11_device *sv11=d->ml_priv;
 	int err = -1;
 	
 	/*
@@ -128,7 +128,7 @@
 
 static int hostess_close(struct net_device *d)
 {
-	struct sv11_device *sv11=d->priv;
+	struct sv11_device *sv11=d->ml_priv;
 	/*
 	 *	Discard new frames
 	 */
@@ -159,14 +159,14 @@
 
 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
 {
-	/* struct sv11_device *sv11=d->priv;
+	/* struct sv11_device *sv11=d->ml_priv;
 	   z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
 	return sppp_do_ioctl(d, ifr,cmd);
 }
 
 static struct net_device_stats *hostess_get_stats(struct net_device *d)
 {
-	struct sv11_device *sv11=d->priv;
+	struct sv11_device *sv11=d->ml_priv;
 	if(sv11)
 		return z8530_get_stats(&sv11->sync.chanA);
 	else
@@ -179,7 +179,7 @@
  
 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
 {
-	struct sv11_device *sv11=d->priv;
+	struct sv11_device *sv11=d->ml_priv;
 	return z8530_queue_xmit(&sv11->sync.chanA, skb);
 }
 
@@ -325,6 +325,7 @@
 		/* 
 		 *	Initialise the PPP components
 		 */
+		d->ml_priv = sv;
 		sppp_attach(&sv->netdev);
 		
 		/*
@@ -333,7 +334,6 @@
 		
 		d->base_addr = iobase;
 		d->irq = irq;
-		d->priv = sv;
 		
 		if(register_netdev(d))
 		{
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6635ece..62133ce 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -891,6 +891,7 @@
 
     /* Initialize the sppp layer */
     /* An ioctl can cause a subsequent detach for raw frame interface */
+    dev->ml_priv = sc;
     sc->if_type = LMC_PPP;
     sc->check = 0xBEAFCAFE;
     dev->base_addr = pci_resource_start(pdev, 0);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 11276bf..44a89df 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -241,6 +241,7 @@
 		return NULL;
 
 	sv = d->priv;
+	d->ml_priv = sv;
 	sv->if_ptr = &sv->pppdev;
 	sv->pppdev.dev = d;
 	d->base_addr = iobase;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index d340683..62a3d8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -666,7 +666,7 @@
 	rx_status.flag = 0;
 	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
 	rx_status.freq =
-		ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel));
+		ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
 	rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
 				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index b608e1c..c9847b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -163,8 +163,8 @@
 	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
 #endif
 	struct iwl4965_rate dbg_fixed;
-	struct iwl_priv *drv;
 #endif
+	struct iwl_priv *drv;
 };
 
 static void rs_rate_scale_perform(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 17f629f..bf19eb8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -3978,7 +3978,7 @@
 
 	rx_status.mactime = le64_to_cpu(rx_start->timestamp);
 	rx_status.freq =
-		ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
+		ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
 	rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
 				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
 	rx_status.rate_idx =
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 04c2638..9196825 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -388,8 +388,15 @@
 
 	netif_start_queue(ndev);
 
-	/* Turn off carrier unless we know we have associated */
-	netif_carrier_off(ndev);
+	/* Turn off carrier if in STA or Ad-hoc mode. It will be turned on
+	 * once the firmware receives a trap of being associated
+	 * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we
+	 * should just leave the carrier on as its expected the firmware
+	 * won't send us a trigger. */
+	if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC)
+		netif_carrier_off(ndev);
+	else
+		netif_carrier_on(ndev);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 8d8657f..b22c027 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1032,8 +1032,10 @@
 	 * Initialize the device.
 	 */
 	status = rt2x00dev->ops->lib->initialize(rt2x00dev);
-	if (status)
-		goto exit;
+	if (status) {
+		rt2x00queue_uninitialize(rt2x00dev);
+		return status;
+	}
 
 	__set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags);
 
@@ -1043,11 +1045,6 @@
 	rt2x00rfkill_register(rt2x00dev);
 
 	return 0;
-
-exit:
-	rt2x00lib_uninitialize(rt2x00dev);
-
-	return status;
 }
 
 int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 7867ec6..971af25 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -314,13 +314,14 @@
 	if (status) {
 		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
 		      pci_dev->irq, status);
-		return status;
+		goto exit;
 	}
 
 	return 0;
 
 exit:
-	rt2x00pci_uninitialize(rt2x00dev);
+	queue_for_each(rt2x00dev, queue)
+		rt2x00pci_free_queue_dma(rt2x00dev, queue);
 
 	return status;
 }
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ae12dcd..14bc7b2 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2366,6 +2366,7 @@
 {
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 	struct rt2x00_intf *intf = vif_to_intf(control->vif);
+	struct queue_entry_priv_pci_tx *priv_tx;
 	struct skb_frame_desc *skbdesc;
 	unsigned int beacon_base;
 	u32 reg;
@@ -2373,21 +2374,8 @@
 	if (unlikely(!intf->beacon))
 		return -ENOBUFS;
 
-	/*
-	 * We need to append the descriptor in front of the
-	 * beacon frame.
-	 */
-	if (skb_headroom(skb) < intf->beacon->queue->desc_size) {
-		if (pskb_expand_head(skb, intf->beacon->queue->desc_size,
-				     0, GFP_ATOMIC))
-			return -ENOMEM;
-	}
-
-	/*
-	 * Add the descriptor in front of the skb.
-	 */
-	skb_push(skb, intf->beacon->queue->desc_size);
-	memset(skb->data, 0, intf->beacon->queue->desc_size);
+	priv_tx = intf->beacon->priv_data;
+	memset(priv_tx->desc, 0, intf->beacon->queue->desc_size);
 
 	/*
 	 * Fill in skb descriptor
@@ -2395,9 +2383,9 @@
 	skbdesc = get_skb_frame_desc(skb);
 	memset(skbdesc, 0, sizeof(*skbdesc));
 	skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
-	skbdesc->data = skb->data + intf->beacon->queue->desc_size;
-	skbdesc->data_len = skb->len - intf->beacon->queue->desc_size;
-	skbdesc->desc = skb->data;
+	skbdesc->data = skb->data;
+	skbdesc->data_len = skb->len;
+	skbdesc->desc = priv_tx->desc;
 	skbdesc->desc_len = intf->beacon->queue->desc_size;
 	skbdesc->entry = intf->beacon;
 
@@ -2425,7 +2413,10 @@
 	 */
 	beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
 	rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
-				      skb->data, skb->len);
+				      skbdesc->desc, skbdesc->desc_len);
+	rt2x00pci_register_multiwrite(rt2x00dev,
+				      beacon_base + skbdesc->desc_len,
+				      skbdesc->data, skbdesc->data_len);
 	rt61pci_kick_tx_queue(rt2x00dev, control->queue);
 
 	return 0;
@@ -2490,7 +2481,7 @@
 
 static const struct data_queue_desc rt61pci_queue_bcn = {
 	.entry_num		= 4 * BEACON_ENTRIES,
-	.data_size		= MGMT_FRAME_SIZE,
+	.data_size		= 0, /* No DMA required for beacons */
 	.desc_size		= TXINFO_SIZE,
 	.priv_size		= sizeof(struct queue_entry_priv_pci_tx),
 };
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 03384a4..49ae970 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -908,9 +908,9 @@
 	     p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5],
 	     p->psa_call_code[6], p->psa_call_code[7]);
 #ifdef DEBUG_SHOW_UNUSED
-	printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+	printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
 	       p->psa_reserved[0],
-	       p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]);
+	       p->psa_reserved[1]);
 #endif				/* DEBUG_SHOW_UNUSED */
 	printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
 	printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index baf7401..b584c0e 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1074,11 +1074,9 @@
 	 p->psa_call_code[6],
 	 p->psa_call_code[7]);
 #ifdef DEBUG_SHOW_UNUSED
-  printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+  printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
 	 p->psa_reserved[0],
-	 p->psa_reserved[1],
-	 p->psa_reserved[2],
-	 p->psa_reserved[3]);
+	 p->psa_reserved[1]);
 #endif	/* DEBUG_SHOW_UNUSED */
   printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
   printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 5316074..12e24f0 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -889,9 +889,13 @@
 	}
 free_urb:
 	skb = (struct sk_buff *)urb->context;
-	zd_mac_tx_to_dev(skb, urb->status);
+	/*
+	 * grab 'usb' pointer before handing off the skb (since
+	 * it might be freed by zd_mac_tx_to_dev or mac80211)
+	 */
 	cb = (struct zd_tx_skb_control_block *)skb->cb;
 	usb = &zd_hw_mac(cb->hw)->chip.usb;
+	zd_mac_tx_to_dev(skb, urb->status);
 	free_tx_urb(usb, urb);
 	tx_dec_submitted_urbs(usb);
 	return;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index cefe7f2..63c3404 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1248,6 +1248,9 @@
 	{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
 	},
+	{ USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
+	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+	},
 
 	/* control interfaces with various AT-command sets */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 99e5a68..fae55a3 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -156,6 +156,10 @@
 static struct attribute_group ep_dev_attr_grp = {
 	.attrs = ep_dev_attrs,
 };
+static struct attribute_group *ep_dev_groups[] = {
+	&ep_dev_attr_grp,
+	NULL
+};
 
 static int usb_endpoint_major_init(void)
 {
@@ -298,6 +302,7 @@
 
 	ep_dev->desc = &endpoint->desc;
 	ep_dev->udev = udev;
+	ep_dev->dev.groups = ep_dev_groups;
 	ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
 	ep_dev->dev.class = ep_class->class;
 	ep_dev->dev.parent = parent;
@@ -309,9 +314,6 @@
 	retval = device_register(&ep_dev->dev);
 	if (retval)
 		goto error_chrdev;
-	retval = sysfs_create_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
-	if (retval)
-		goto error_group;
 
 	/* create the symlink to the old-style "ep_XX" directory */
 	sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
@@ -322,8 +324,6 @@
 	return retval;
 
 error_link:
-	sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
-error_group:
 	device_unregister(&ep_dev->dev);
 	destroy_endpoint_class();
 	return retval;
@@ -348,7 +348,6 @@
 
 		sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
 		sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
-		sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
 		device_unregister(&ep_dev->dev);
 		endpoint->ep_dev = NULL;
 		destroy_endpoint_class();
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3e69266..fe47d14 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1607,6 +1607,7 @@
 		intf->dev.driver = NULL;
 		intf->dev.bus = &usb_bus_type;
 		intf->dev.type = &usb_if_device_type;
+		intf->dev.groups = usb_interface_groups;
 		intf->dev.dma_mask = dev->dev.dma_mask;
 		device_initialize(&intf->dev);
 		mark_quiesced(intf);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5b20a60..c783cb1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -538,6 +538,46 @@
 	.attrs = dev_attrs,
 };
 
+/* When modifying this list, be sure to modify dev_string_attrs_are_visible()
+ * accordingly.
+ */
+static struct attribute *dev_string_attrs[] = {
+	&dev_attr_manufacturer.attr,
+	&dev_attr_product.attr,
+	&dev_attr_serial.attr,
+	NULL
+};
+
+static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+		struct attribute *a, int n)
+{
+	struct usb_device *udev = to_usb_device(
+			container_of(kobj, struct device, kobj));
+
+	if (a == &dev_attr_manufacturer.attr) {
+		if (udev->manufacturer == NULL)
+			return 0;
+	} else if (a == &dev_attr_product.attr) {
+		if (udev->product == NULL)
+			return 0;
+	} else if (a == &dev_attr_serial.attr) {
+		if (udev->serial == NULL)
+			return 0;
+	}
+	return a->mode;
+}
+
+static struct attribute_group dev_string_attr_grp = {
+	.attrs =	dev_string_attrs,
+	.is_visible =	dev_string_attrs_are_visible,
+};
+
+struct attribute_group *usb_device_groups[] = {
+	&dev_attr_grp,
+	&dev_string_attr_grp,
+	NULL
+};
+
 /* Binary descriptors */
 
 static ssize_t
@@ -591,10 +631,9 @@
 	struct device *dev = &udev->dev;
 	int retval;
 
-	retval = sysfs_create_group(&dev->kobj, &dev_attr_grp);
-	if (retval)
-		return retval;
-
+	/* Unforunately these attributes cannot be created before
+	 * the uevent is broadcast.
+	 */
 	retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
 	if (retval)
 		goto error;
@@ -607,21 +646,6 @@
 	if (retval)
 		goto error;
 
-	if (udev->manufacturer) {
-		retval = device_create_file(dev, &dev_attr_manufacturer);
-		if (retval)
-			goto error;
-	}
-	if (udev->product) {
-		retval = device_create_file(dev, &dev_attr_product);
-		if (retval)
-			goto error;
-	}
-	if (udev->serial) {
-		retval = device_create_file(dev, &dev_attr_serial);
-		if (retval)
-			goto error;
-	}
 	retval = usb_create_ep_files(dev, &udev->ep0, udev);
 	if (retval)
 		goto error;
@@ -636,13 +660,9 @@
 	struct device *dev = &udev->dev;
 
 	usb_remove_ep_files(&udev->ep0);
-	device_remove_file(dev, &dev_attr_manufacturer);
-	device_remove_file(dev, &dev_attr_product);
-	device_remove_file(dev, &dev_attr_serial);
 	remove_power_attributes(dev);
 	remove_persist_attributes(dev);
 	device_remove_bin_file(dev, &dev_bin_attr_descriptors);
-	sysfs_remove_group(&dev->kobj, &dev_attr_grp);
 }
 
 /* Interface Accociation Descriptor fields */
@@ -688,17 +708,15 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct usb_interface *intf;
-	struct usb_device *udev;
-	int len;
+	char *string;
 
 	intf = to_usb_interface(dev);
-	udev = interface_to_usbdev(intf);
-	len = snprintf(buf, 256, "%s", intf->cur_altsetting->string);
-	if (len < 0)
+	string = intf->cur_altsetting->string;
+	barrier();		/* The altsetting might change! */
+
+	if (!string)
 		return 0;
-	buf[len] = '\n';
-	buf[len+1] = 0;
-	return len+1;
+	return sprintf(buf, "%s\n", string);
 }
 static DEVICE_ATTR(interface, S_IRUGO, show_interface_string, NULL);
 
@@ -727,18 +745,6 @@
 }
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
-static struct attribute *intf_assoc_attrs[] = {
-	&dev_attr_iad_bFirstInterface.attr,
-	&dev_attr_iad_bInterfaceCount.attr,
-	&dev_attr_iad_bFunctionClass.attr,
-	&dev_attr_iad_bFunctionSubClass.attr,
-	&dev_attr_iad_bFunctionProtocol.attr,
-	NULL,
-};
-static struct attribute_group intf_assoc_attr_grp = {
-	.attrs = intf_assoc_attrs,
-};
-
 static struct attribute *intf_attrs[] = {
 	&dev_attr_bInterfaceNumber.attr,
 	&dev_attr_bAlternateSetting.attr,
@@ -753,6 +759,37 @@
 	.attrs = intf_attrs,
 };
 
+static struct attribute *intf_assoc_attrs[] = {
+	&dev_attr_iad_bFirstInterface.attr,
+	&dev_attr_iad_bInterfaceCount.attr,
+	&dev_attr_iad_bFunctionClass.attr,
+	&dev_attr_iad_bFunctionSubClass.attr,
+	&dev_attr_iad_bFunctionProtocol.attr,
+	NULL,
+};
+
+static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+		struct attribute *a, int n)
+{
+	struct usb_interface *intf = to_usb_interface(
+			container_of(kobj, struct device, kobj));
+
+	if (intf->intf_assoc == NULL)
+		return 0;
+	return a->mode;
+}
+
+static struct attribute_group intf_assoc_attr_grp = {
+	.attrs =	intf_assoc_attrs,
+	.is_visible =	intf_assoc_attrs_are_visible,
+};
+
+struct attribute_group *usb_interface_groups[] = {
+	&intf_attr_grp,
+	&intf_assoc_attr_grp,
+	NULL
+};
+
 static inline void usb_create_intf_ep_files(struct usb_interface *intf,
 		struct usb_device *udev)
 {
@@ -777,23 +814,21 @@
 
 int usb_create_sysfs_intf_files(struct usb_interface *intf)
 {
-	struct device *dev = &intf->dev;
 	struct usb_device *udev = interface_to_usbdev(intf);
 	struct usb_host_interface *alt = intf->cur_altsetting;
 	int retval;
 
 	if (intf->sysfs_files_created)
 		return 0;
-	retval = sysfs_create_group(&dev->kobj, &intf_attr_grp);
-	if (retval)
-		return retval;
 
+	/* The interface string may be present in some altsettings
+	 * and missing in others.  Hence its attribute cannot be created
+	 * before the uevent is broadcast.
+	 */
 	if (alt->string == NULL)
 		alt->string = usb_cache_string(udev, alt->desc.iInterface);
 	if (alt->string)
-		retval = device_create_file(dev, &dev_attr_interface);
-	if (intf->intf_assoc)
-		retval = sysfs_create_group(&dev->kobj, &intf_assoc_attr_grp);
+		retval = device_create_file(&intf->dev, &dev_attr_interface);
 	usb_create_intf_ep_files(intf, udev);
 	intf->sysfs_files_created = 1;
 	return 0;
@@ -807,7 +842,5 @@
 		return;
 	usb_remove_intf_ep_files(intf);
 	device_remove_file(dev, &dev_attr_interface);
-	sysfs_remove_group(&dev->kobj, &intf_attr_grp);
-	sysfs_remove_group(&intf->dev.kobj, &intf_assoc_attr_grp);
 	intf->sysfs_files_created = 0;
 }
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 1f0db51..3257743 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -291,6 +291,7 @@
 	device_initialize(&dev->dev);
 	dev->dev.bus = &usb_bus_type;
 	dev->dev.type = &usb_device_type;
+	dev->dev.groups = usb_device_groups;
 	dev->dev.dma_mask = bus->controller->dma_mask;
 	set_dev_node(&dev->dev, dev_to_node(bus->controller));
 	dev->state = USB_STATE_ATTACHED;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 1bf8ccb..1a8bc21 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -130,6 +130,10 @@
 /* for labeling diagnostics */
 extern const char *usbcore_name;
 
+/* sysfs stuff */
+extern struct attribute_group *usb_device_groups[];
+extern struct attribute_group *usb_interface_groups[];
+
 /* usbfs stuff */
 extern struct mutex usbfs_mutex;
 extern struct usb_driver usbfs_driver;
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index ce337cb..f261d2a 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3251,7 +3251,7 @@
 	/* pci setup */
 	if (pci_enable_device(pdev) < 0) {
 		kfree(dev);
-		dev = 0;
+		dev = NULL;
 		retval = -ENODEV;
 		goto finished;
 	}
@@ -3264,7 +3264,7 @@
 	if (!request_mem_region(resource, len, name)) {
 		dev_dbg(&pdev->dev, "pci device used already\n");
 		kfree(dev);
-		dev = 0;
+		dev = NULL;
 		retval = -EBUSY;
 		goto finished;
 	}
@@ -3274,7 +3274,7 @@
 	if (dev->virt_addr == NULL) {
 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
 		kfree(dev);
-		dev = 0;
+		dev = NULL;
 		retval = -EFAULT;
 		goto finished;
 	}
@@ -3282,7 +3282,7 @@
 	if (!pdev->irq) {
 		dev_err(&dev->pdev->dev, "irq not set\n");
 		kfree(dev);
-		dev = 0;
+		dev = NULL;
 		retval = -ENODEV;
 		goto finished;
 	}
@@ -3290,7 +3290,7 @@
 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 		dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
 		kfree(dev);
-		dev = 0;
+		dev = NULL;
 		retval = -EBUSY;
 		goto finished;
 	}
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e756023..07e5a0b 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -649,7 +649,13 @@
 
 	if (!ep->desc) {
 		spin_unlock_irqrestore(&udc->lock, flags);
-		DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
+		/* REVISIT because this driver disables endpoints in
+		 * reset_all_endpoints() before calling disconnect(),
+		 * most gadget drivers would trigger this non-error ...
+		 */
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN)
+			DBG(DBG_ERR, "ep_disable: %s not enabled\n",
+					ep->ep.name);
 		return -EINVAL;
 	}
 	ep->desc = NULL;
@@ -1032,8 +1038,6 @@
 			.release	= nop_release,
 		},
 	},
-
-	.lock	= SPIN_LOCK_UNLOCKED,
 };
 
 /*
@@ -1052,6 +1056,12 @@
 		request_complete(ep, req, -ECONNRESET);
 	}
 
+	/* NOTE:  normally, the next call to the gadget driver is in
+	 * charge of disabling endpoints... usually disconnect().
+	 * The exception would be entering a high speed test mode.
+	 *
+	 * FIXME remove this code ... and retest thoroughly.
+	 */
 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
 		if (ep->desc) {
 			spin_unlock(&udc->lock);
@@ -1219,7 +1229,7 @@
 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
 		struct usb_ctrlrequest *crq)
 {
-	int retval = 0;;
+	int retval = 0;
 
 	switch (crq->bRequest) {
 	case USB_REQ_GET_STATUS: {
@@ -1693,6 +1703,14 @@
 		usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
 		reset_all_endpoints(udc);
 
+		if (udc->gadget.speed != USB_SPEED_UNKNOWN
+				&& udc->driver->disconnect) {
+			udc->gadget.speed = USB_SPEED_UNKNOWN;
+			spin_unlock(&udc->lock);
+			udc->driver->disconnect(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+
 		if (status & USBA_HIGH_SPEED) {
 			DBG(DBG_BUS, "High-speed bus reset detected\n");
 			udc->gadget.speed = USB_SPEED_HIGH;
@@ -1716,9 +1734,13 @@
 				| USBA_DET_SUSPEND
 				| USBA_END_OF_RESUME));
 
+		/*
+		 * Unclear why we hit this irregularly, e.g. in usbtest,
+		 * but it's clearly harmless...
+		 */
 		if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
-			dev_warn(&udc->pdev->dev,
-				 "WARNING: EP0 configuration is invalid!\n");
+			dev_dbg(&udc->pdev->dev,
+				 "ODD: EP0 configuration is invalid!\n");
 	}
 
 	spin_unlock(&udc->lock);
@@ -1751,9 +1773,11 @@
 			reset_all_endpoints(udc);
 			toggle_bias(0);
 			usba_writel(udc, CTRL, USBA_DISABLE_MASK);
-			spin_unlock(&udc->lock);
-			udc->driver->disconnect(&udc->gadget);
-			spin_lock(&udc->lock);
+			if (udc->driver->disconnect) {
+				spin_unlock(&udc->lock);
+				udc->driver->disconnect(&udc->gadget);
+				spin_lock(&udc->lock);
+			}
 		}
 		udc->vbus_prev = vbus;
 	}
@@ -1825,7 +1849,7 @@
 
 	if (!udc->pdev)
 		return -ENODEV;
-	if (driver != udc->driver)
+	if (driver != udc->driver || !driver->unbind)
 		return -EINVAL;
 
 	if (udc->vbus_pin != -1)
@@ -1840,6 +1864,9 @@
 	toggle_bias(0);
 	usba_writel(udc, CTRL, USBA_DISABLE_MASK);
 
+	if (udc->driver->disconnect)
+		udc->driver->disconnect(&udc->gadget);
+
 	driver->unbind(&udc->gadget);
 	udc->gadget.dev.driver = NULL;
 	udc->driver = NULL;
@@ -1879,6 +1906,7 @@
 		goto err_get_hclk;
 	}
 
+	spin_lock_init(&udc->lock);
 	udc->pdev = pdev;
 	udc->pclk = pclk;
 	udc->hclk = hclk;
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 75eba20..499b7a2 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1546,7 +1546,6 @@
 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
 	ep0_idle(dev);
-	strcpy(dev->dev->bus_id, "");
 
 	/* PXA endpoints init */
 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
@@ -1746,13 +1745,10 @@
 		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
 	}
 
-	le16_to_cpus(&u.r.wValue);
-	le16_to_cpus(&u.r.wIndex);
-	le16_to_cpus(&u.r.wLength);
-
 	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
 		u.r.bRequestType, u.r.bRequest,
-		u.r.wValue, u.r.wIndex, u.r.wLength);
+		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
+		le16_to_cpu(u.r.wLength));
 	if (unlikely(have_extrabytes))
 		goto stall;
 
@@ -2296,7 +2292,8 @@
 {
 	struct pxa_udc *udc = platform_get_drvdata(_dev);
 
-	udc_disable(udc);
+	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
+		udc_disable(udc);
 }
 
 #ifdef CONFIG_PM
@@ -2361,9 +2358,8 @@
 	 * Upon exit from sleep mode and before clearing OTGPH,
 	 * Software must configure the USB OTG pad, UDC, and UHC
 	 * to the state they were in before entering sleep mode.
-	 *
-	 * Should be : PSSR |= PSSR_OTGPH;
 	 */
+	PSSR |= PSSR_OTGPH;
 
 	return 0;
 }
@@ -2387,6 +2383,9 @@
 
 static int __init udc_init(void)
 {
+	if (!cpu_is_pxa27x())
+		return -ENODEV;
+
 	printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
 	return platform_driver_probe(&udc_driver, pxa_udc_probe);
 }
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index 1d1b793..97453db 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -484,4 +484,12 @@
 #define ep_warn(ep, fmt, arg...) \
 	dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
 
+/*
+ * Cannot include pxa-regs.h, as register names are similar.
+ * So PSSR is redefined here. This should be removed once UDC registers will
+ * be gone from pxa-regs.h.
+ */
+#define PSSR		__REG(0x40F00004)	/* Power Manager Sleep Status */
+#define PSSR_OTGPH	(1 << 6)		/* OTG Peripheral Hold */
+
 #endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 54cdd6f..fa019fa 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -14,7 +14,6 @@
  * This software is distributed under the terms of the GNU General
  * Public License ("GPL") as published by the Free Software Foundation,
  * either version 2 of that License or (at your option) any later version.
- *
  */
 
 #include <linux/kernel.h>
@@ -33,7 +32,7 @@
 /* Defines */
 
 #define GS_VERSION_STR			"v2.2"
-#define GS_VERSION_NUM			0x0202
+#define GS_VERSION_NUM			0x2200
 
 #define GS_LONG_NAME			"Gadget Serial"
 #define GS_SHORT_NAME			"g_serial"
@@ -41,7 +40,11 @@
 #define GS_MAJOR			127
 #define GS_MINOR_START			0
 
-#define GS_NUM_PORTS			16
+/* REVISIT only one port is supported for now;
+ * see gs_{send,recv}_packet() ... no multiplexing,
+ * and no support for multiple ACM devices.
+ */
+#define GS_NUM_PORTS			1
 
 #define GS_NUM_CONFIGS			1
 #define GS_NO_CONFIG_ID			0
@@ -65,6 +68,9 @@
 
 #define GS_DEFAULT_USE_ACM		0
 
+/* 9600-8-N-1 ... matches init_termios.c_cflag and defaults
+ * expected by "usbser.sys" on MS-Windows.
+ */
 #define GS_DEFAULT_DTE_RATE		9600
 #define GS_DEFAULT_DATA_BITS		8
 #define GS_DEFAULT_PARITY		USB_CDC_NO_PARITY
@@ -107,10 +113,6 @@
 #define GS_NOTIFY_MAXPACKET		8
 
 
-/* Structures */
-
-struct gs_dev;
-
 /* circular buffer */
 struct gs_buf {
 	unsigned int		buf_size;
@@ -119,12 +121,6 @@
 	char			*buf_put;
 };
 
-/* list of requests */
-struct gs_req_entry {
-	struct list_head	re_entry;
-	struct usb_request	*re_req;
-};
-
 /* the port structure holds info for each port, one for each minor number */
 struct gs_port {
 	struct gs_dev		*port_dev;	/* pointer to device struct */
@@ -164,26 +160,7 @@
 
 /* Functions */
 
-/* module */
-static int __init gs_module_init(void);
-static void __exit gs_module_exit(void);
-
-/* tty driver */
-static int gs_open(struct tty_struct *tty, struct file *file);
-static void gs_close(struct tty_struct *tty, struct file *file);
-static int gs_write(struct tty_struct *tty,
-	const unsigned char *buf, int count);
-static int gs_put_char(struct tty_struct *tty, unsigned char ch);
-static void gs_flush_chars(struct tty_struct *tty);
-static int gs_write_room(struct tty_struct *tty);
-static int gs_chars_in_buffer(struct tty_struct *tty);
-static void gs_throttle(struct tty_struct * tty);
-static void gs_unthrottle(struct tty_struct * tty);
-static void gs_break(struct tty_struct *tty, int break_state);
-static int  gs_ioctl(struct tty_struct *tty, struct file *file,
-	unsigned int cmd, unsigned long arg);
-static void gs_set_termios(struct tty_struct *tty, struct ktermios *old);
-
+/* tty driver internals */
 static int gs_send(struct gs_dev *dev);
 static int gs_send_packet(struct gs_dev *dev, char *packet,
 	unsigned int size);
@@ -192,19 +169,7 @@
 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req);
 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req);
 
-/* gadget driver */
-static int gs_bind(struct usb_gadget *gadget);
-static void gs_unbind(struct usb_gadget *gadget);
-static int gs_setup(struct usb_gadget *gadget,
-	const struct usb_ctrlrequest *ctrl);
-static int gs_setup_standard(struct usb_gadget *gadget,
-	const struct usb_ctrlrequest *ctrl);
-static int gs_setup_class(struct usb_gadget *gadget,
-	const struct usb_ctrlrequest *ctrl);
-static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
-static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
-	struct usb_request *req);
-static void gs_disconnect(struct usb_gadget *gadget);
+/* gadget driver internals */
 static int gs_set_config(struct gs_dev *dev, unsigned config);
 static void gs_reset_config(struct gs_dev *dev);
 static int gs_build_config_buf(u8 *buf, struct usb_gadget *g,
@@ -214,10 +179,6 @@
 	gfp_t kmalloc_flags);
 static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
 
-static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
-	gfp_t kmalloc_flags);
-static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
-
 static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
 static void gs_free_ports(struct gs_dev *dev);
 
@@ -232,62 +193,15 @@
 static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
 	unsigned int count);
 
-/* external functions */
-extern int net2280_set_fifo_mode(struct usb_gadget *gadget, int mode);
-
 
 /* Globals */
 
 static struct gs_dev *gs_device;
 
-static const char *EP_IN_NAME;
-static const char *EP_OUT_NAME;
-static const char *EP_NOTIFY_NAME;
-
 static struct mutex gs_open_close_lock[GS_NUM_PORTS];
 
-static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
-static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
 
-static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
-
-static unsigned int use_acm = GS_DEFAULT_USE_ACM;
-
-
-/* tty driver struct */
-static const struct tty_operations gs_tty_ops = {
-	.open =			gs_open,
-	.close =		gs_close,
-	.write =		gs_write,
-	.put_char =		gs_put_char,
-	.flush_chars =		gs_flush_chars,
-	.write_room =		gs_write_room,
-	.ioctl =		gs_ioctl,
-	.set_termios =		gs_set_termios,
-	.throttle =		gs_throttle,
-	.unthrottle =		gs_unthrottle,
-	.break_ctl =		gs_break,
-	.chars_in_buffer =	gs_chars_in_buffer,
-};
-static struct tty_driver *gs_tty_driver;
-
-/* gadget driver struct */
-static struct usb_gadget_driver gs_gadget_driver = {
-#ifdef CONFIG_USB_GADGET_DUALSPEED
-	.speed =		USB_SPEED_HIGH,
-#else
-	.speed =		USB_SPEED_FULL,
-#endif /* CONFIG_USB_GADGET_DUALSPEED */
-	.function =		GS_LONG_NAME,
-	.bind =			gs_bind,
-	.unbind =		gs_unbind,
-	.setup =		gs_setup,
-	.disconnect =		gs_disconnect,
-	.driver = {
-		.name =		GS_SHORT_NAME,
-	},
-};
-
+/*-------------------------------------------------------------------------*/
 
 /* USB descriptors */
 
@@ -304,7 +218,6 @@
 static struct usb_string gs_strings[] = {
 	{ GS_MANUFACTURER_STR_ID, manufacturer },
 	{ GS_PRODUCT_STR_ID, GS_LONG_NAME },
-	{ GS_SERIAL_STR_ID, "0" },
 	{ GS_BULK_CONFIG_STR_ID, "Gadget Serial Bulk" },
 	{ GS_ACM_CONFIG_STR_ID, "Gadget Serial CDC ACM" },
 	{ GS_CONTROL_STR_ID, "Gadget Serial Control" },
@@ -327,7 +240,6 @@
 	.idProduct =		__constant_cpu_to_le16(GS_PRODUCT_ID),
 	.iManufacturer =	GS_MANUFACTURER_STR_ID,
 	.iProduct =		GS_PRODUCT_STR_ID,
-	.iSerialNumber =	GS_SERIAL_STR_ID,
 	.bNumConfigurations =	GS_NUM_CONFIGS,
 };
 
@@ -364,7 +276,7 @@
 	.bDescriptorType =	USB_DT_INTERFACE,
 	.bInterfaceNumber =	GS_BULK_INTERFACE_ID,
 	.bNumEndpoints =	2,
-	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
 	.bInterfaceSubClass =	0,
 	.bInterfaceProtocol =	0,
 	.iInterface =		GS_DATA_STR_ID,
@@ -521,6 +433,8 @@
 };
 
 
+/*-------------------------------------------------------------------------*/
+
 /* Module */
 MODULE_DESCRIPTION(GS_LONG_NAME);
 MODULE_AUTHOR("Al Borchers");
@@ -531,84 +445,23 @@
 MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
 #endif
 
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
 module_param(read_q_size, uint, S_IRUGO);
 MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
 
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
 module_param(write_q_size, uint, S_IRUGO);
 MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
 
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
 module_param(write_buf_size, uint, S_IRUGO);
 MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
 
+static unsigned int use_acm = GS_DEFAULT_USE_ACM;
 module_param(use_acm, uint, S_IRUGO);
 MODULE_PARM_DESC(use_acm, "Use CDC ACM, 0=no, 1=yes, default=no");
 
-module_init(gs_module_init);
-module_exit(gs_module_exit);
-
-/*
-*  gs_module_init
-*
-*  Register as a USB gadget driver and a tty driver.
-*/
-static int __init gs_module_init(void)
-{
-	int i;
-	int retval;
-
-	retval = usb_gadget_register_driver(&gs_gadget_driver);
-	if (retval) {
-		pr_err("gs_module_init: cannot register gadget driver, "
-			"ret=%d\n", retval);
-		return retval;
-	}
-
-	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
-	if (!gs_tty_driver)
-		return -ENOMEM;
-	gs_tty_driver->owner = THIS_MODULE;
-	gs_tty_driver->driver_name = GS_SHORT_NAME;
-	gs_tty_driver->name = "ttygs";
-	gs_tty_driver->major = GS_MAJOR;
-	gs_tty_driver->minor_start = GS_MINOR_START;
-	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
-	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-	gs_tty_driver->init_termios = tty_std_termios;
-	gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
-	tty_set_operations(gs_tty_driver, &gs_tty_ops);
-
-	for (i=0; i < GS_NUM_PORTS; i++)
-		mutex_init(&gs_open_close_lock[i]);
-
-	retval = tty_register_driver(gs_tty_driver);
-	if (retval) {
-		usb_gadget_unregister_driver(&gs_gadget_driver);
-		put_tty_driver(gs_tty_driver);
-		pr_err("gs_module_init: cannot register tty driver, "
-				"ret=%d\n", retval);
-		return retval;
-	}
-
-	pr_info("gs_module_init: %s %s loaded\n",
-			GS_LONG_NAME, GS_VERSION_STR);
-	return 0;
-}
-
-/*
-* gs_module_exit
-*
-* Unregister as a tty driver and a USB gadget driver.
-*/
-static void __exit gs_module_exit(void)
-{
-	tty_unregister_driver(gs_tty_driver);
-	put_tty_driver(gs_tty_driver);
-	usb_gadget_unregister_driver(&gs_gadget_driver);
-
-	pr_info("gs_module_exit: %s %s unloaded\n",
-			GS_LONG_NAME, GS_VERSION_STR);
-}
+/*-------------------------------------------------------------------------*/
 
 /* TTY Driver */
 
@@ -753,15 +606,15 @@
  * gs_close
  */
 
-#define GS_WRITE_FINISHED_EVENT_SAFELY(p)			\
-({								\
-	int cond;						\
-								\
-	spin_lock_irq(&(p)->port_lock);				\
-	cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf); \
-	spin_unlock_irq(&(p)->port_lock);			\
-	cond;							\
-})
+static int gs_write_finished_event_safely(struct gs_port *p)
+{
+	int cond;
+
+	spin_lock_irq(&(p)->port_lock);
+	cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf);
+	spin_unlock_irq(&(p)->port_lock);
+	return cond;
+}
 
 static void gs_close(struct tty_struct *tty, struct file *file)
 {
@@ -807,7 +660,7 @@
 	if (gs_buf_data_avail(port->port_write_buf) > 0) {
 		spin_unlock_irq(&port->port_lock);
 		wait_event_interruptible_timeout(port->port_write_wait,
-					GS_WRITE_FINISHED_EVENT_SAFELY(port),
+					gs_write_finished_event_safely(port),
 					GS_CLOSE_TIMEOUT * HZ);
 		spin_lock_irq(&port->port_lock);
 	}
@@ -1065,6 +918,23 @@
 {
 }
 
+static const struct tty_operations gs_tty_ops = {
+	.open =			gs_open,
+	.close =		gs_close,
+	.write =		gs_write,
+	.put_char =		gs_put_char,
+	.flush_chars =		gs_flush_chars,
+	.write_room =		gs_write_room,
+	.ioctl =		gs_ioctl,
+	.set_termios =		gs_set_termios,
+	.throttle =		gs_throttle,
+	.unthrottle =		gs_unthrottle,
+	.break_ctl =		gs_break,
+	.chars_in_buffer =	gs_chars_in_buffer,
+};
+
+/*-------------------------------------------------------------------------*/
+
 /*
 * gs_send
 *
@@ -1080,7 +950,6 @@
 	unsigned long flags;
 	struct usb_ep *ep;
 	struct usb_request *req;
-	struct gs_req_entry *req_entry;
 
 	if (dev == NULL) {
 		pr_err("gs_send: NULL device pointer\n");
@@ -1093,10 +962,8 @@
 
 	while(!list_empty(&dev->dev_req_list)) {
 
-		req_entry = list_entry(dev->dev_req_list.next,
-			struct gs_req_entry, re_entry);
-
-		req = req_entry->re_req;
+		req = list_entry(dev->dev_req_list.next,
+				struct usb_request, list);
 
 		len = gs_send_packet(dev, req->buf, ep->maxpacket);
 
@@ -1106,7 +973,7 @@
 					*((unsigned char *)req->buf),
 					*((unsigned char *)req->buf+1),
 					*((unsigned char *)req->buf+2));
-			list_del(&req_entry->re_entry);
+			list_del(&req->list);
 			req->length = len;
 			spin_unlock_irqrestore(&dev->dev_lock, flags);
 			if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
@@ -1289,7 +1156,6 @@
 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct gs_dev *dev = ep->driver_data;
-	struct gs_req_entry *gs_req = req->context;
 
 	if (dev == NULL) {
 		pr_err("gs_write_complete: NULL device pointer\n");
@@ -1300,13 +1166,8 @@
 	case 0:
 		/* normal completion */
 requeue:
-		if (gs_req == NULL) {
-			pr_err("gs_write_complete: NULL request pointer\n");
-			return;
-		}
-
 		spin_lock(&dev->dev_lock);
-		list_add(&gs_req->re_entry, &dev->dev_req_list);
+		list_add(&req->list, &dev->dev_req_list);
 		spin_unlock(&dev->dev_lock);
 
 		gs_send(dev);
@@ -1328,9 +1189,39 @@
 	}
 }
 
+/*-------------------------------------------------------------------------*/
+
 /* Gadget Driver */
 
 /*
+ * gs_unbind
+ *
+ * Called on module unload.  Frees the control request and device
+ * structure.
+ */
+static void /* __init_or_exit */ gs_unbind(struct usb_gadget *gadget)
+{
+	struct gs_dev *dev = get_gadget_data(gadget);
+
+	gs_device = NULL;
+
+	/* read/write requests already freed, only control request remains */
+	if (dev != NULL) {
+		if (dev->dev_ctrl_req != NULL) {
+			gs_free_req(gadget->ep0, dev->dev_ctrl_req);
+			dev->dev_ctrl_req = NULL;
+		}
+		gs_reset_config(dev);
+		gs_free_ports(dev);
+		kfree(dev);
+		set_gadget_data(gadget, NULL);
+	}
+
+	pr_info("gs_unbind: %s %s unbound\n", GS_LONG_NAME,
+		GS_VERSION_STR);
+}
+
+/*
  * gs_bind
  *
  * Called on module load.  Allocates and initializes the device
@@ -1362,19 +1253,23 @@
 			__constant_cpu_to_le16(GS_VERSION_NUM|0x0099);
 	}
 
+	dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
 	usb_ep_autoconfig_reset(gadget);
 
 	ep = usb_ep_autoconfig(gadget, &gs_fullspeed_in_desc);
 	if (!ep)
 		goto autoconf_fail;
-	EP_IN_NAME = ep->name;
-	ep->driver_data = ep;	/* claim the endpoint */
+	dev->dev_in_ep = ep;
+	ep->driver_data = dev;	/* claim the endpoint */
 
 	ep = usb_ep_autoconfig(gadget, &gs_fullspeed_out_desc);
 	if (!ep)
 		goto autoconf_fail;
-	EP_OUT_NAME = ep->name;
-	ep->driver_data = ep;	/* claim the endpoint */
+	dev->dev_out_ep = ep;
+	ep->driver_data = dev;	/* claim the endpoint */
 
 	if (use_acm) {
 		ep = usb_ep_autoconfig(gadget, &gs_fullspeed_notify_desc);
@@ -1384,8 +1279,8 @@
 		}
 		gs_device_desc.idProduct = __constant_cpu_to_le16(
 						GS_CDC_PRODUCT_ID),
-		EP_NOTIFY_NAME = ep->name;
-		ep->driver_data = ep;	/* claim the endpoint */
+		dev->dev_notify_ep = ep;
+		ep->driver_data = dev;	/* claim the endpoint */
 	}
 
 	gs_device_desc.bDeviceClass = use_acm
@@ -1415,9 +1310,7 @@
 		gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 	}
 
-	gs_device = dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
-	if (dev == NULL)
-		return -ENOMEM;
+	gs_device = dev;
 
 	snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
 		init_utsname()->sysname, init_utsname()->release,
@@ -1441,8 +1334,6 @@
 		gs_unbind(gadget);
 		return -ENOMEM;
 	}
-	dev->dev_ctrl_req->complete = gs_setup_complete;
-
 	gadget->ep0->driver_data = dev;
 
 	pr_info("gs_bind: %s %s bound\n",
@@ -1451,99 +1342,11 @@
 	return 0;
 
 autoconf_fail:
+	kfree(dev);
 	pr_err("gs_bind: cannot autoconfigure on %s\n", gadget->name);
 	return -ENODEV;
 }
 
-/*
- * gs_unbind
- *
- * Called on module unload.  Frees the control request and device
- * structure.
- */
-static void /* __init_or_exit */ gs_unbind(struct usb_gadget *gadget)
-{
-	struct gs_dev *dev = get_gadget_data(gadget);
-
-	gs_device = NULL;
-
-	/* read/write requests already freed, only control request remains */
-	if (dev != NULL) {
-		if (dev->dev_ctrl_req != NULL) {
-			gs_free_req(gadget->ep0, dev->dev_ctrl_req);
-			dev->dev_ctrl_req = NULL;
-		}
-		gs_free_ports(dev);
-		if (dev->dev_notify_ep)
-			usb_ep_disable(dev->dev_notify_ep);
-		if (dev->dev_in_ep)
-			usb_ep_disable(dev->dev_in_ep);
-		if (dev->dev_out_ep)
-			usb_ep_disable(dev->dev_out_ep);
-		kfree(dev);
-		set_gadget_data(gadget, NULL);
-	}
-
-	pr_info("gs_unbind: %s %s unbound\n", GS_LONG_NAME,
-		GS_VERSION_STR);
-}
-
-/*
- * gs_setup
- *
- * Implements all the control endpoint functionality that's not
- * handled in hardware or the hardware driver.
- *
- * Returns the size of the data sent to the host, or a negative
- * error number.
- */
-static int gs_setup(struct usb_gadget *gadget,
-	const struct usb_ctrlrequest *ctrl)
-{
-	int ret = -EOPNOTSUPP;
-	struct gs_dev *dev = get_gadget_data(gadget);
-	struct usb_request *req = dev->dev_ctrl_req;
-	u16 wIndex = le16_to_cpu(ctrl->wIndex);
-	u16 wValue = le16_to_cpu(ctrl->wValue);
-	u16 wLength = le16_to_cpu(ctrl->wLength);
-
-	req->complete = gs_setup_complete;
-
-	switch (ctrl->bRequestType & USB_TYPE_MASK) {
-	case USB_TYPE_STANDARD:
-		ret = gs_setup_standard(gadget,ctrl);
-		break;
-
-	case USB_TYPE_CLASS:
-		ret = gs_setup_class(gadget,ctrl);
-		break;
-
-	default:
-		pr_err("gs_setup: unknown request, type=%02x, request=%02x, "
-			"value=%04x, index=%04x, length=%d\n",
-			ctrl->bRequestType, ctrl->bRequest,
-			wValue, wIndex, wLength);
-		break;
-	}
-
-	/* respond with data transfer before status phase? */
-	if (ret >= 0) {
-		req->length = ret;
-		req->zero = ret < wLength
-				&& (ret % gadget->ep0->maxpacket) == 0;
-		ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
-		if (ret < 0) {
-			pr_err("gs_setup: cannot queue response, ret=%d\n",
-				ret);
-			req->status = 0;
-			gs_setup_complete(gadget->ep0, req);
-		}
-	}
-
-	/* device either stalls (ret < 0) or reports success */
-	return ret;
-}
-
 static int gs_setup_standard(struct usb_gadget *gadget,
 	const struct usb_ctrlrequest *ctrl)
 {
@@ -1673,6 +1476,42 @@
 	return ret;
 }
 
+static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct gs_dev *dev = ep->driver_data;
+	struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
+
+	switch (req->status) {
+	case 0:
+		/* normal completion */
+		if (req->actual != sizeof(port->port_line_coding))
+			usb_ep_set_halt(ep);
+		else if (port) {
+			struct usb_cdc_line_coding	*value = req->buf;
+
+			/* REVISIT:  we currently just remember this data.
+			 * If we change that, (a) validate it first, then
+			 * (b) update whatever hardware needs updating.
+			 */
+			spin_lock(&port->port_lock);
+			port->port_line_coding = *value;
+			spin_unlock(&port->port_lock);
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		/* unexpected */
+		break;
+	}
+	return;
+}
+
 static int gs_setup_class(struct usb_gadget *gadget,
 	const struct usb_ctrlrequest *ctrl)
 {
@@ -1734,42 +1573,6 @@
 	return ret;
 }
 
-static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
-		struct usb_request *req)
-{
-	struct gs_dev *dev = ep->driver_data;
-	struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
-
-	switch (req->status) {
-	case 0:
-		/* normal completion */
-		if (req->actual != sizeof(port->port_line_coding))
-			usb_ep_set_halt(ep);
-		else if (port) {
-			struct usb_cdc_line_coding	*value = req->buf;
-
-			/* REVISIT:  we currently just remember this data.
-			 * If we change that, (a) validate it first, then
-			 * (b) update whatever hardware needs updating.
-			 */
-			spin_lock(&port->port_lock);
-			port->port_line_coding = *value;
-			spin_unlock(&port->port_lock);
-		}
-		break;
-
-	case -ESHUTDOWN:
-		/* disconnect */
-		gs_free_req(ep, req);
-		break;
-
-	default:
-		/* unexpected */
-		break;
-	}
-	return;
-}
-
 /*
  * gs_setup_complete
  */
@@ -1783,6 +1586,62 @@
 }
 
 /*
+ * gs_setup
+ *
+ * Implements all the control endpoint functionality that's not
+ * handled in hardware or the hardware driver.
+ *
+ * Returns the size of the data sent to the host, or a negative
+ * error number.
+ */
+static int gs_setup(struct usb_gadget *gadget,
+	const struct usb_ctrlrequest *ctrl)
+{
+	int		ret = -EOPNOTSUPP;
+	struct gs_dev	*dev = get_gadget_data(gadget);
+	struct usb_request *req = dev->dev_ctrl_req;
+	u16		wIndex = le16_to_cpu(ctrl->wIndex);
+	u16		wValue = le16_to_cpu(ctrl->wValue);
+	u16		wLength = le16_to_cpu(ctrl->wLength);
+
+	req->complete = gs_setup_complete;
+
+	switch (ctrl->bRequestType & USB_TYPE_MASK) {
+	case USB_TYPE_STANDARD:
+		ret = gs_setup_standard(gadget, ctrl);
+		break;
+
+	case USB_TYPE_CLASS:
+		ret = gs_setup_class(gadget, ctrl);
+		break;
+
+	default:
+		pr_err("gs_setup: unknown request, type=%02x, request=%02x, "
+			"value=%04x, index=%04x, length=%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+		break;
+	}
+
+	/* respond with data transfer before status phase? */
+	if (ret >= 0) {
+		req->length = ret;
+		req->zero = ret < wLength
+				&& (ret % gadget->ep0->maxpacket) == 0;
+		ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("gs_setup: cannot queue response, ret=%d\n",
+				ret);
+			req->status = 0;
+			gs_setup_complete(gadget->ep0, req);
+		}
+	}
+
+	/* device either stalls (ret < 0) or reports success */
+	return ret;
+}
+
+/*
  * gs_disconnect
  *
  * Called when the device is disconnected.  Frees the closed
@@ -1811,6 +1670,23 @@
 	pr_info("gs_disconnect: %s disconnected\n", GS_LONG_NAME);
 }
 
+static struct usb_gadget_driver gs_gadget_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+	.speed =		USB_SPEED_HIGH,
+#else
+	.speed =		USB_SPEED_FULL,
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+	.function =		GS_LONG_NAME,
+	.bind =			gs_bind,
+	.unbind =		gs_unbind,
+	.setup =		gs_setup,
+	.disconnect =		gs_disconnect,
+	.driver = {
+		.name =		GS_SHORT_NAME,
+		.owner =	THIS_MODULE,
+	},
+};
+
 /*
  * gs_set_config
  *
@@ -1826,9 +1702,8 @@
 	int ret = 0;
 	struct usb_gadget *gadget = dev->dev_gadget;
 	struct usb_ep *ep;
-	struct usb_endpoint_descriptor *ep_desc;
+	struct usb_endpoint_descriptor *out, *in, *notify;
 	struct usb_request *req;
-	struct gs_req_entry *req_entry;
 
 	if (dev == NULL) {
 		pr_err("gs_set_config: NULL device pointer\n");
@@ -1846,86 +1721,62 @@
 	case GS_BULK_CONFIG_ID:
 		if (use_acm)
 			return -EINVAL;
-		/* device specific optimizations */
-		if (gadget_is_net2280(gadget))
-			net2280_set_fifo_mode(gadget, 1);
 		break;
 	case GS_ACM_CONFIG_ID:
 		if (!use_acm)
 			return -EINVAL;
-		/* device specific optimizations */
-		if (gadget_is_net2280(gadget))
-			net2280_set_fifo_mode(gadget, 1);
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	dev->dev_config = config;
-
-	gadget_for_each_ep(ep, gadget) {
-
-		if (EP_NOTIFY_NAME
-		&& strcmp(ep->name, EP_NOTIFY_NAME) == 0) {
-			ep_desc = choose_ep_desc(gadget,
+	in = choose_ep_desc(gadget,
+			&gs_highspeed_in_desc,
+			&gs_fullspeed_in_desc);
+	out = choose_ep_desc(gadget,
+			&gs_highspeed_out_desc,
+			&gs_fullspeed_out_desc);
+	notify = dev->dev_notify_ep
+		? choose_ep_desc(gadget,
 				&gs_highspeed_notify_desc,
-				&gs_fullspeed_notify_desc);
-			ret = usb_ep_enable(ep,ep_desc);
-			if (ret == 0) {
-				ep->driver_data = dev;
-				dev->dev_notify_ep = ep;
-				dev->dev_notify_ep_desc = ep_desc;
-			} else {
-				pr_err("gs_set_config: cannot enable NOTIFY "
-					"endpoint %s, ret=%d\n",
-					ep->name, ret);
-				goto exit_reset_config;
-			}
-		}
+				&gs_fullspeed_notify_desc)
+		: NULL;
 
-		else if (strcmp(ep->name, EP_IN_NAME) == 0) {
-			ep_desc = choose_ep_desc(gadget,
-				&gs_highspeed_in_desc,
-				&gs_fullspeed_in_desc);
-			ret = usb_ep_enable(ep,ep_desc);
-			if (ret == 0) {
-				ep->driver_data = dev;
-				dev->dev_in_ep = ep;
-				dev->dev_in_ep_desc = ep_desc;
-			} else {
-				pr_err("gs_set_config: cannot enable IN "
-					"endpoint %s, ret=%d\n",
-					ep->name, ret);
-				goto exit_reset_config;
-			}
-		}
-
-		else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
-			ep_desc = choose_ep_desc(gadget,
-				&gs_highspeed_out_desc,
-				&gs_fullspeed_out_desc);
-			ret = usb_ep_enable(ep,ep_desc);
-			if (ret == 0) {
-				ep->driver_data = dev;
-				dev->dev_out_ep = ep;
-				dev->dev_out_ep_desc = ep_desc;
-			} else {
-				pr_err("gs_set_config: cannot enable OUT "
-					"endpoint %s, ret=%d\n",
-					ep->name, ret);
-				goto exit_reset_config;
-			}
-		}
-
+	ret = usb_ep_enable(dev->dev_in_ep, in);
+	if (ret == 0) {
+		dev->dev_in_ep_desc = in;
+	} else {
+		pr_debug("%s: cannot enable %s %s, ret=%d\n",
+			__func__, "IN", dev->dev_in_ep->name, ret);
+		return ret;
 	}
 
-	if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL
-	|| (config != GS_BULK_CONFIG_ID && dev->dev_notify_ep == NULL)) {
-		pr_err("gs_set_config: cannot find endpoints\n");
-		ret = -ENODEV;
-		goto exit_reset_config;
+	ret = usb_ep_enable(dev->dev_out_ep, out);
+	if (ret == 0) {
+		dev->dev_out_ep_desc = out;
+	} else {
+		pr_debug("%s: cannot enable %s %s, ret=%d\n",
+			__func__, "OUT", dev->dev_out_ep->name, ret);
+fail0:
+		usb_ep_disable(dev->dev_in_ep);
+		return ret;
 	}
 
+	if (notify) {
+		ret = usb_ep_enable(dev->dev_notify_ep, notify);
+		if (ret == 0) {
+			dev->dev_notify_ep_desc = notify;
+		} else {
+			pr_debug("%s: cannot enable %s %s, ret=%d\n",
+				__func__, "NOTIFY",
+				dev->dev_notify_ep->name, ret);
+			usb_ep_disable(dev->dev_out_ep);
+			goto fail0;
+		}
+	}
+
+	dev->dev_config = config;
+
 	/* allocate and queue read requests */
 	ep = dev->dev_out_ep;
 	for (i=0; i<read_q_size && ret == 0; i++) {
@@ -1946,9 +1797,10 @@
 	/* allocate write requests, and put on free list */
 	ep = dev->dev_in_ep;
 	for (i=0; i<write_q_size; i++) {
-		if ((req_entry=gs_alloc_req_entry(ep, ep->maxpacket, GFP_ATOMIC))) {
-			req_entry->re_req->complete = gs_write_complete;
-			list_add(&req_entry->re_entry, &dev->dev_req_list);
+		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+		if (req) {
+			req->complete = gs_write_complete;
+			list_add(&req->list, &dev->dev_req_list);
 		} else {
 			pr_err("gs_set_config: cannot allocate "
 					"write requests\n");
@@ -1986,7 +1838,7 @@
  */
 static void gs_reset_config(struct gs_dev *dev)
 {
-	struct gs_req_entry *req_entry;
+	struct usb_request *req;
 
 	if (dev == NULL) {
 		pr_err("gs_reset_config: NULL device pointer\n");
@@ -2000,26 +1852,18 @@
 
 	/* free write requests on the free list */
 	while(!list_empty(&dev->dev_req_list)) {
-		req_entry = list_entry(dev->dev_req_list.next,
-			struct gs_req_entry, re_entry);
-		list_del(&req_entry->re_entry);
-		gs_free_req_entry(dev->dev_in_ep, req_entry);
+		req = list_entry(dev->dev_req_list.next,
+				struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_in_ep, req);
 	}
 
 	/* disable endpoints, forcing completion of pending i/o; */
 	/* completion handlers free their requests in this case */
-	if (dev->dev_notify_ep) {
+	if (dev->dev_notify_ep)
 		usb_ep_disable(dev->dev_notify_ep);
-		dev->dev_notify_ep = NULL;
-	}
-	if (dev->dev_in_ep) {
-		usb_ep_disable(dev->dev_in_ep);
-		dev->dev_in_ep = NULL;
-	}
-	if (dev->dev_out_ep) {
-		usb_ep_disable(dev->dev_out_ep);
-		dev->dev_out_ep = NULL;
-	}
+	usb_ep_disable(dev->dev_in_ep);
+	usb_ep_disable(dev->dev_out_ep);
 }
 
 /*
@@ -2113,46 +1957,6 @@
 }
 
 /*
- * gs_alloc_req_entry
- *
- * Allocates a request and its buffer, using the given
- * endpoint, buffer len, and kmalloc flags.
- */
-static struct gs_req_entry *
-gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
-{
-	struct gs_req_entry	*req;
-
-	req = kmalloc(sizeof(struct gs_req_entry), kmalloc_flags);
-	if (req == NULL)
-		return NULL;
-
-	req->re_req = gs_alloc_req(ep, len, kmalloc_flags);
-	if (req->re_req == NULL) {
-		kfree(req);
-		return NULL;
-	}
-
-	req->re_req->context = req;
-
-	return req;
-}
-
-/*
- * gs_free_req_entry
- *
- * Frees a request and its buffer.
- */
-static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
-{
-	if (ep != NULL && req != NULL) {
-		if (req->re_req != NULL)
-			gs_free_req(ep, req->re_req);
-		kfree(req);
-	}
-}
-
-/*
  * gs_alloc_ports
  *
  * Allocate all ports and set the gs_dev struct to point to them.
@@ -2233,6 +2037,8 @@
 	}
 }
 
+/*-------------------------------------------------------------------------*/
+
 /* Circular Buffer */
 
 /*
@@ -2393,3 +2199,77 @@
 
 	return count;
 }
+
+/*-------------------------------------------------------------------------*/
+
+static struct tty_driver *gs_tty_driver;
+
+/*
+ *  gs_module_init
+ *
+ *  Register as a USB gadget driver and a tty driver.
+ */
+static int __init gs_module_init(void)
+{
+	int i;
+	int retval;
+
+	retval = usb_gadget_register_driver(&gs_gadget_driver);
+	if (retval) {
+		pr_err("gs_module_init: cannot register gadget driver, "
+			"ret=%d\n", retval);
+		return retval;
+	}
+
+	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+	if (!gs_tty_driver)
+		return -ENOMEM;
+	gs_tty_driver->owner = THIS_MODULE;
+	gs_tty_driver->driver_name = GS_SHORT_NAME;
+	gs_tty_driver->name = "ttygs";
+	gs_tty_driver->major = GS_MAJOR;
+	gs_tty_driver->minor_start = GS_MINOR_START;
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	gs_tty_driver->init_termios = tty_std_termios;
+	/* must match GS_DEFAULT_DTE_RATE and friends */
+	gs_tty_driver->init_termios.c_cflag =
+		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	gs_tty_driver->init_termios.c_ispeed = GS_DEFAULT_DTE_RATE;
+	gs_tty_driver->init_termios.c_ospeed = GS_DEFAULT_DTE_RATE;
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	for (i = 0; i < GS_NUM_PORTS; i++)
+		mutex_init(&gs_open_close_lock[i]);
+
+	retval = tty_register_driver(gs_tty_driver);
+	if (retval) {
+		usb_gadget_unregister_driver(&gs_gadget_driver);
+		put_tty_driver(gs_tty_driver);
+		pr_err("gs_module_init: cannot register tty driver, "
+				"ret=%d\n", retval);
+		return retval;
+	}
+
+	pr_info("gs_module_init: %s %s loaded\n",
+			GS_LONG_NAME, GS_VERSION_STR);
+	return 0;
+}
+module_init(gs_module_init);
+
+/*
+ * gs_module_exit
+ *
+ * Unregister as a tty driver and a USB gadget driver.
+ */
+static void __exit gs_module_exit(void)
+{
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	usb_gadget_unregister_driver(&gs_gadget_driver);
+
+	pr_info("gs_module_exit: %s %s unloaded\n",
+			GS_LONG_NAME, GS_VERSION_STR);
+}
+module_exit(gs_module_exit);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 4ba96c1..c9cec87 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -988,7 +988,7 @@
 			 * This did not trigger for a long time now.
 			 */
 			printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: "
-					"%d of %d done: %08x cur: %08x\n", qtd,
+					"%d of %zu done: %08x cur: %08x\n", qtd,
 					urb, qh, PTD_XFERRED_LENGTH(dw3),
 					qtd->length, done_map,
 					(1 << queue_entry));
@@ -1088,7 +1088,7 @@
 		} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
 			/* short BULK received */
 
-			printk(KERN_ERR "short bulk, %d instead %d\n", length,
+			printk(KERN_ERR "short bulk, %d instead %zu\n", length,
 					qtd->length);
 			if (urb->transfer_flags & URB_SHORT_NOT_OK) {
 				urb->status = -EREMOTEIO;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 73fb2a3..440bf94 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -256,7 +256,7 @@
 
 static int __init isp1760_init(void)
 {
-	int ret;
+	int ret = -ENODEV;
 
 	init_kmem_once();
 
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 77204f0..e899a77 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -90,7 +90,7 @@
 	struct device *dev = &pdev->dev;
 	struct resource	*res, *mem;
 	int retval, irq;
-	struct usb_hcd *hcd = 0;
+	struct usb_hcd *hcd = NULL;
 
 	irq = retval = platform_get_irq(pdev, 0);
 	if (retval < 0)
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 7aafd53..189a9db 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -63,9 +63,6 @@
 #define USB_DEVICE_ID_VERNIER_CYCLOPS	0x0004
 #define USB_DEVICE_ID_VERNIER_LCSPEC	0x0006
 
-#define USB_VENDOR_ID_MICROCHIP		0x04d8
-#define USB_DEVICE_ID_PICDEM		0x000c
-
 #ifdef CONFIG_USB_DYNAMIC_MINORS
 #define USB_LD_MINOR_BASE	0
 #else
@@ -92,7 +89,6 @@
 	{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
 	{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
 	{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
-	{ USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICDEM) },
 	{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
 	{ }					/* Terminating entry */
 };
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 742be3c..054dedd 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -856,6 +856,11 @@
 		struct urb		*u;
 		struct usb_ctrlrequest	req;
 		struct subcase		*reqp;
+
+		/* sign of this variable means:
+		 *  -: tested code must return this (negative) error code
+		 *  +: tested code may return this (negative too) error code
+		 */
 		int			expected = 0;
 
 		/* requests here are mostly expected to succeed on any
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 2cffec8..9ba64cc 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -447,6 +447,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called mos7840.  If unsure, choose N.
 
+config USB_SERIAL_MOTOROLA
+	tristate "USB Motorola Phone modem driver"
+	---help---
+	  Say Y here if you want to use a Motorola phone with a USB
+	  connector as a modem link.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called moto_modem.  If unsure, choose N.
+
 config USB_SERIAL_NAVMAN
 	tristate "USB Navman GPS device"
 	help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 7568595..17a762a 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_USB_SERIAL_MCT_U232)		+= mct_u232.o
 obj-$(CONFIG_USB_SERIAL_MOS7720)		+= mos7720.o
 obj-$(CONFIG_USB_SERIAL_MOS7840)		+= mos7840.o
+obj-$(CONFIG_USB_SERIAL_MOTOROLA)		+= moto_modem.o
 obj-$(CONFIG_USB_SERIAL_NAVMAN)			+= navman.o
 obj-$(CONFIG_USB_SERIAL_OMNINET)		+= omninet.o
 obj-$(CONFIG_USB_SERIAL_OPTION)			+= option.o
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
new file mode 100644
index 0000000..2e8e054
--- /dev/null
+++ b/drivers/usb/serial/moto_modem.c
@@ -0,0 +1,70 @@
+/*
+ * Motorola USB Phone driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ * {sigh}
+ * Mororola should be using the CDC ACM USB spec, but instead
+ * they try to just "do their own thing"...  This driver should handle a
+ * few phones in which a basic "dumb serial connection" is needed to be
+ * able to get a connection through to them.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+static struct usb_device_id id_table [] = {
+	{ USB_DEVICE(0x05c6, 0x3197) },	/* unknown Motorola phone */
+	{ USB_DEVICE(0x0c44, 0x0022) },	/* unknown Mororola phone */
+	{ USB_DEVICE(0x22b8, 0x2a64) },	/* Motorola KRZR K1m */
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver moto_driver = {
+	.name =		"moto-modem",
+	.probe =	usb_serial_probe,
+	.disconnect =	usb_serial_disconnect,
+	.id_table =	id_table,
+	.no_dynamic_id = 	1,
+};
+
+static struct usb_serial_driver moto_device = {
+	.driver = {
+		.owner =	THIS_MODULE,
+		.name =		"moto-modem",
+	},
+	.id_table =		id_table,
+	.num_ports =		1,
+};
+
+static int __init moto_init(void)
+{
+	int retval;
+
+	retval = usb_serial_register(&moto_device);
+	if (retval)
+		return retval;
+	retval = usb_register(&moto_driver);
+	if (retval)
+		usb_serial_deregister(&moto_device);
+	return retval;
+}
+
+static void __exit moto_exit(void)
+{
+	usb_deregister(&moto_driver);
+	usb_serial_deregister(&moto_device);
+}
+
+module_init(moto_init);
+module_exit(moto_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4be2d4..e7e016e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -154,8 +154,6 @@
 #define NOVATELWIRELESS_PRODUCT_MC727		0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D		0x4400
 
-#define NOVATELWIRELESS_PRODUCT_U727		0x5010
-
 /* FUTURE NOVATEL PRODUCTS */
 #define NOVATELWIRELESS_PRODUCT_EVDO_1		0x6000
 #define NOVATELWIRELESS_PRODUCT_HSPA_1		0x7000
@@ -184,6 +182,9 @@
 #define AXESSTEL_VENDOR_ID			0x1726
 #define AXESSTEL_PRODUCT_MV110H			0x1000
 
+#define ONDA_VENDOR_ID				0x19d2
+#define ONDA_PRODUCT_ET502HS			0x0002
+
 #define BANDRICH_VENDOR_ID			0x1A8D
 #define BANDRICH_PRODUCT_C100_1			0x1002
 #define BANDRICH_PRODUCT_C100_2			0x1003
@@ -269,7 +270,6 @@
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
-	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel U727 */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */
@@ -293,14 +293,17 @@
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8136) },	/* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
 	{ USB_DEVICE(DELL_VENDOR_ID, 0x8137) },	/* Dell Wireless HSDPA 5520 */
+	{ USB_DEVICE(DELL_VENDOR_ID, 0x8138) },	/* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
 	{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
+	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
 	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
 	{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
+	{ USB_DEVICE(0x19d2, 0x0001) }, 	/* Telstra NextG CDMA */
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index a0ed889..1b09578 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -401,6 +401,14 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
+UNUSUAL_DEV(  0x04b4, 0x6830, 0x0000, 0x9999,
+		"Cypress",
+		"Cypress AT2LP",
+		US_SC_CYP_ATACB, US_PR_BULK, NULL,
+		0),
+#endif
+
 /* Reported by Simon Levitt <simon@whattf.com>
  * This entry needs Sub and Proto fields */
 UNUSUAL_DEV(  0x04b8, 0x0601, 0x0100, 0x0100,
@@ -539,17 +547,6 @@
 		"CD-RW Device",
 		US_SC_8020, US_PR_CB, NULL, 0),
 
-/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
- * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
- * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
- */
-
-UNUSUAL_DEV(  0x04fc, 0x80c2, 0x0100, 0x0100,
-		"Kobian Mercury",
-		"Binocam DCB-132",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_BULK32),
-
 #ifdef CONFIG_USB_STORAGE_USBAT
 UNUSUAL_DEV(  0x04e6, 0x1010, 0x0000, 0x9999,
 		"Shuttle/SCM",
@@ -565,6 +562,16 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_MAX_SECTORS_64),
 
+/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+ * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
+ * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+ */
+UNUSUAL_DEV(  0x04fc, 0x80c2, 0x0100, 0x0100,
+		"Kobian Mercury",
+		"Binocam DCB-132",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_BULK32),
+
 /* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */
 UNUSUAL_DEV(  0x050d, 0x0115, 0x0133, 0x0133,
 		"Belkin",
@@ -1304,6 +1311,16 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_DEVICE ),
 
+/* Reported by F. Aben <f.aben@option.com>
+ * This device (wrongly) has a vendor-specific device descriptor.
+ * The entry is needed so usb-storage can bind to it's mass-storage
+ * interface as an interface driver */
+UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
+		"Option",
+		"GI 0401 SD-Card",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		0 ),
+
 #ifdef CONFIG_USB_STORAGE_ISD200
 UNUSUAL_DEV(  0x0bf6, 0xa001, 0x0100, 0x0110,
 		"ATI",
@@ -1361,13 +1378,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY),
 
-/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
-UNUSUAL_DEV(  0x2770, 0x915d, 0x0010, 0x0010,
-		"INTOVA",
-		"Pixtreme",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
 /*
  * Entry for Jenoptik JD 5200z3
  *
@@ -1684,6 +1694,16 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Mauro Andreolini <andreoli@weblab.ing.unimo.it>
+ * This entry is needed to bypass the ZeroCD mechanism
+ * and to properly load as a modem device.
+ */
+UNUSUAL_DEV(  0x19d2, 0x2000, 0x0000, 0x0000,
+		"Onda ET502HS",
+		"USB MMC Storage",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_DEVICE),
+
 /* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
  * and Renato Perini <rperini@email.it>
  */
@@ -1721,6 +1741,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_GO_SLOW ),
 
+/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
+UNUSUAL_DEV(  0x2770, 0x915d, 0x0010, 0x0010,
+		"INTOVA",
+		"Pixtreme",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /*
  * David Härdeman <david@2gen.com>
  * The key makes the SCSI stack print confusing (but harmless) messages
@@ -1745,14 +1772,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_CAPACITY_HEURISTICS),
 
-#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
-UNUSUAL_DEV(  0x04b4, 0x6830, 0x0000, 0x9999,
-		"Cypress",
-		"Cypress AT2LP",
-		US_SC_CYP_ATACB, US_PR_BULK, NULL,
-		0),
-#endif
-
 /* Control/Bulk transport for all SubClass values */
 USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR),
 USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR),
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
index 384dc08..ac78eba 100644
--- a/include/asm-alpha/barrier.h
+++ b/include/asm-alpha/barrier.h
@@ -24,7 +24,7 @@
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
 #endif
 
 #define set_mb(var, value) \
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 05ce5fb..3f0c59f 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -287,17 +287,34 @@
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 #define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
 
+/*
+ * The smp_read_barrier_depends() in the following functions are required to
+ * order the load of *dir (the pointer in the top level page table) with any
+ * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
+ *
+ * If this ordering is not enforced, the CPU might load an older value of
+ * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
+ * more details.
+ *
+ * Note that we never change the mm->pgd pointer after the task is running, so
+ * pgd_offset does not require such a barrier.
+ */
+
 /* Find an entry in the second-level page table.. */
 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 {
-	return (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+	pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+	smp_read_barrier_depends(); /* see above */
+	return ret;
 }
 
 /* Find an entry in the third-level page table.. */
 extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
 {
-	return (pte_t *) pmd_page_vaddr(*dir)
+	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
 		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
+	smp_read_barrier_depends(); /* see above */
+	return ret;
 }
 
 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir),(addr))
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index cb307f8..d3a12a9 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -179,7 +179,7 @@
 #define mb()			asm volatile ("membar" : : :"memory")
 #define rmb()			asm volatile ("membar" : : :"memory")
 #define wmb()			asm volatile ("membar" : : :"memory")
-#define read_barrier_depends()	barrier()
+#define read_barrier_depends()	do { } while (0)
 
 #ifdef CONFIG_SMP
 #define smp_mb()			mb()
diff --git a/include/linux/device.h b/include/linux/device.h
index 8c23e3d..15e9fa3 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -183,7 +183,6 @@
 	struct module		*owner;
 
 	struct kset		subsys;
-	struct list_head	children;
 	struct list_head	devices;
 	struct list_head	interfaces;
 	struct kset		class_dirs;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e9874e7..ae7aec3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -525,7 +525,7 @@
 #define ADDPART_FLAG_RAID	1
 #define ADDPART_FLAG_WHOLEDISK	2
 
-extern dev_t blk_lookup_devt(const char *name);
+extern dev_t blk_lookup_devt(const char *name, int part);
 extern char *disk_name (struct gendisk *hd, int part, char *buf);
 
 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
@@ -553,7 +553,7 @@
 
 static inline void printk_all_partitions(void) { }
 
-static inline dev_t blk_lookup_devt(const char *name)
+static inline dev_t blk_lookup_devt(const char *name, int part)
 {
 	dev_t devt = MKDEV(0, 0);
 	return devt;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7c1d446..b11e6e1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -93,14 +93,16 @@
  *	used.
  */
  
-#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
-#define LL_MAX_HEADER	32
+#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+# if defined(CONFIG_MAC80211_MESH)
+#  define LL_MAX_HEADER 128
+# else
+#  define LL_MAX_HEADER 96
+# endif
+#elif defined(CONFIG_TR)
+# define LL_MAX_HEADER 48
 #else
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
-#define LL_MAX_HEADER	96
-#else
-#define LL_MAX_HEADER	48
-#endif
+# define LL_MAX_HEADER 32
 #endif
 
 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
@@ -244,11 +246,16 @@
  *
  * We could use other alignment values, but we must maintain the
  * relationship HH alignment <= LL alignment.
+ *
+ * LL_ALLOCATED_SPACE also takes into account the tailroom the device
+ * may need.
  */
 #define LL_RESERVED_SPACE(dev) \
-	(((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
-	((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+#define LL_ALLOCATED_SPACE(dev) \
+	((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 
 struct header_ops {
 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
@@ -567,6 +574,13 @@
 	unsigned short		type;	/* interface hardware type	*/
 	unsigned short		hard_header_len;	/* hardware hdr length	*/
 
+	/* extra head- and tailroom the hardware may need, but not in all cases
+	 * can this be guaranteed, especially tailroom. Some cases also use
+	 * LL_MAX_HEADER instead to allocate the skb.
+	 */
+	unsigned short		needed_headroom;
+	unsigned short		needed_tailroom;
+
 	struct net_device	*master; /* Pointer to master device of a group,
 					  * which this device is member of.
 					  */
@@ -715,6 +729,9 @@
 	struct net		*nd_net;
 #endif
 
+	/* mid-layer private */
+	void			*ml_priv;
+
 	/* bridge stuff */
 	struct net_bridge_port	*br_port;
 	/* macvlan */
diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h
new file mode 100644
index 0000000..07c5e3c
--- /dev/null
+++ b/include/linux/usb/association.h
@@ -0,0 +1,150 @@
+/*
+ * Wireless USB - Cable Based Association
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_USB_ASSOCIATION_H
+#define __LINUX_USB_ASSOCIATION_H
+
+
+/*
+ * Association attributes
+ *
+ * Association Models Supplement to WUSB 1.0 T[3-1]
+ *
+ * Each field in the structures has it's ID, it's length and then the
+ * value. This is the actual definition of the field's ID and its
+ * length.
+ */
+struct wusb_am_attr {
+	__u8 id;
+	__u8 len;
+};
+
+/* Different fields defined by the spec */
+#define WUSB_AR_AssociationTypeId	{ .id = 0x0000, .len =  2 }
+#define WUSB_AR_AssociationSubTypeId	{ .id = 0x0001, .len =  2 }
+#define WUSB_AR_Length			{ .id = 0x0002, .len =  4 }
+#define WUSB_AR_AssociationStatus	{ .id = 0x0004, .len =  4 }
+#define WUSB_AR_LangID			{ .id = 0x0008, .len =  2 }
+#define WUSB_AR_DeviceFriendlyName	{ .id = 0x000b, .len = 64 } /* max */
+#define WUSB_AR_HostFriendlyName	{ .id = 0x000c, .len = 64 } /* max */
+#define WUSB_AR_CHID			{ .id = 0x1000, .len = 16 }
+#define WUSB_AR_CDID			{ .id = 0x1001, .len = 16 }
+#define WUSB_AR_ConnectionContext	{ .id = 0x1002, .len = 48 }
+#define WUSB_AR_BandGroups		{ .id = 0x1004, .len =  2 }
+
+/* CBAF Control Requests (AMS1.0[T4-1] */
+enum {
+	CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
+	CBAF_REQ_GET_ASSOCIATION_REQUEST,
+	CBAF_REQ_SET_ASSOCIATION_RESPONSE
+};
+
+/*
+ * CBAF USB-interface defitions
+ *
+ * No altsettings, one optional interrupt endpoint.
+ */
+enum {
+	CBAF_IFACECLASS    = 0xef,
+	CBAF_IFACESUBCLASS = 0x03,
+	CBAF_IFACEPROTOCOL = 0x01,
+};
+
+/* Association Information (AMS1.0[T4-3]) */
+struct wusb_cbaf_assoc_info {
+	__le16 Length;
+	__u8 NumAssociationRequests;
+	__le16 Flags;
+	__u8 AssociationRequestsArray[];
+} __attribute__((packed));
+
+/* Association Request (AMS1.0[T4-4]) */
+struct wusb_cbaf_assoc_request {
+	__u8 AssociationDataIndex;
+	__u8 Reserved;
+	__le16 AssociationTypeId;
+	__le16 AssociationSubTypeId;
+	__le32 AssociationTypeInfoSize;
+} __attribute__((packed));
+
+enum {
+	AR_TYPE_WUSB                    = 0x0001,
+	AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
+	AR_TYPE_WUSB_ASSOCIATE          = 0x0001,
+};
+
+/* Association Attribute header (AMS1.0[3.8]) */
+struct wusb_cbaf_attr_hdr {
+	__le16 id;
+	__le16 len;
+} __attribute__((packed));
+
+/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
+struct wusb_cbaf_host_info {
+	struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+	__le16 AssociationTypeId;
+	struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+	__le16 AssociationSubTypeId;
+	struct wusb_cbaf_attr_hdr CHID_hdr;
+	struct wusb_ckhdid CHID;
+	struct wusb_cbaf_attr_hdr LangID_hdr;
+	__le16 LangID;
+	struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
+	__u8 HostFriendlyName[];
+} __attribute__((packed));
+
+/* Device Info (AMS1.0[T4-8])
+ *
+ * I still don't get this tag'n'header stuff for each goddamn
+ * field...
+ */
+struct wusb_cbaf_device_info {
+	struct wusb_cbaf_attr_hdr Length_hdr;
+	__le32 Length;
+	struct wusb_cbaf_attr_hdr CDID_hdr;
+	struct wusb_ckhdid CDID;
+	struct wusb_cbaf_attr_hdr BandGroups_hdr;
+	__le16 BandGroups;
+	struct wusb_cbaf_attr_hdr LangID_hdr;
+	__le16 LangID;
+	struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
+	__u8 DeviceFriendlyName[];
+} __attribute__((packed));
+
+/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
+struct wusb_cbaf_cc_data {
+	struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+	__le16 AssociationTypeId;
+	struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+	__le16 AssociationSubTypeId;
+	struct wusb_cbaf_attr_hdr Length_hdr;
+	__le32 Length;
+	struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
+	struct wusb_ckhdid CHID;
+	struct wusb_ckhdid CDID;
+	struct wusb_ckhdid CK;
+	struct wusb_cbaf_attr_hdr BandGroups_hdr;
+	__le16 BandGroups;
+} __attribute__((packed));
+
+/* CC_DATA - Failure case (AMS1.0[T4-10]) */
+struct wusb_cbaf_cc_data_fail {
+	struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+	__le16 AssociationTypeId;
+	struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+	__le16 AssociationSubTypeId;
+	struct wusb_cbaf_attr_hdr Length_hdr;
+	__le16 Length;
+	struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
+	__u32 AssociationStatus;
+} __attribute__((packed));
+
+#endif	/* __LINUX_USB_ASSOCIATION_H */
diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h
index e4efad1..0ce9339 100644
--- a/include/net/irda/discovery.h
+++ b/include/net/irda/discovery.h
@@ -57,9 +57,6 @@
 	__u8  byte[2];
 } __u16_host_order;
 
-/* Same purpose, different application */
-#define u16ho(array) (* ((__u16 *) array))
-
 /* Types of discovery */
 typedef enum {
 	DISCOVERY_LOG,		/* What's in our discovery log */
diff --git a/include/net/syncppp.h b/include/net/syncppp.h
index 877efa4..e43f407 100644
--- a/include/net/syncppp.h
+++ b/include/net/syncppp.h
@@ -59,7 +59,7 @@
 
 static inline struct sppp *sppp_of(struct net_device *dev) 
 {
-	struct ppp_device **ppp = dev->priv;
+	struct ppp_device **ppp = dev->ml_priv;
 	BUG_ON((*ppp)->dev != dev);
 	return &(*ppp)->sppp;
 }
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 3885e70..660c1e5 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -76,6 +76,7 @@
 	char s[32];
 	char *p;
 	dev_t res = 0;
+	int part;
 
 	if (strncmp(name, "/dev/", 5) != 0) {
 		unsigned maj, min;
@@ -106,7 +107,31 @@
 	for (p = s; *p; p++)
 		if (*p == '/')
 			*p = '!';
-	res = blk_lookup_devt(s);
+	res = blk_lookup_devt(s, 0);
+	if (res)
+		goto done;
+
+	/*
+	 * try non-existant, but valid partition, which may only exist
+	 * after revalidating the disk, like partitioned md devices
+	 */
+	while (p > s && isdigit(p[-1]))
+		p--;
+	if (p == s || !*p || *p == '0')
+		goto fail;
+
+	/* try disk name without <part number> */
+	part = simple_strtoul(p, NULL, 10);
+	*p = '\0';
+	res = blk_lookup_devt(s, part);
+	if (res)
+		goto done;
+
+	/* try disk name without p<part number> */
+	if (p < s + 2 || !isdigit(p[-2]) || p[-1] != 'p')
+		goto fail;
+	p[-1] = '\0';
+	res = blk_lookup_devt(s, part);
 	if (res)
 		goto done;
 
diff --git a/mm/memory.c b/mm/memory.c
index 48c122d..fb5608a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -311,6 +311,21 @@
 	if (!new)
 		return -ENOMEM;
 
+	/*
+	 * Ensure all pte setup (eg. pte page lock and page clearing) are
+	 * visible before the pte is made visible to other CPUs by being
+	 * put into page tables.
+	 *
+	 * The other side of the story is the pointer chasing in the page
+	 * table walking code (when walking the page table without locking;
+	 * ie. most of the time). Fortunately, these data accesses consist
+	 * of a chain of data-dependent loads, meaning most CPUs (alpha
+	 * being the notable exception) will already guarantee loads are
+	 * seen in-order. See the alpha page table accessors for the
+	 * smp_read_barrier_depends() barriers in page table walking code.
+	 */
+	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
+
 	spin_lock(&mm->page_table_lock);
 	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
 		mm->nr_ptes++;
@@ -329,6 +344,8 @@
 	if (!new)
 		return -ENOMEM;
 
+	smp_wmb(); /* See comment in __pte_alloc */
+
 	spin_lock(&init_mm.page_table_lock);
 	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
 		pmd_populate_kernel(&init_mm, pmd, new);
@@ -2619,6 +2636,8 @@
 	if (!new)
 		return -ENOMEM;
 
+	smp_wmb(); /* See comment in __pte_alloc */
+
 	spin_lock(&mm->page_table_lock);
 	if (pgd_present(*pgd))		/* Another has populated it */
 		pud_free(mm, new);
@@ -2640,6 +2659,8 @@
 	if (!new)
 		return -ENOMEM;
 
+	smp_wmb(); /* See comment in __pte_alloc */
+
 	spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
 	if (pud_present(*pud))		/* Another has populated it */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b04d643..8fb134d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -419,7 +419,7 @@
 		return;
 
 	size = arp_hdr_len(skb->dev);
-	send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
+	send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
 			    LL_RESERVED_SPACE(np->dev));
 
 	if (!send_skb)
diff --git a/net/core/sock.c b/net/core/sock.c
index fa76f04..88094cb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -270,7 +270,7 @@
 	int err = 0;
 	int skb_len;
 
-	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
+	/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
 	   number of warnings when compiling with -W --ANK
 	 */
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 68d1544..7c9bb13 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -340,7 +340,7 @@
 
 		dev_hold(dev);
 
-		skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev),
+		skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
 					  msg->msg_flags & MSG_DONTWAIT, &err);
 		if (skb==NULL)
 			goto out_unlock;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 68b72a7..418862f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -570,7 +570,7 @@
 	 *	Allocate a buffer
 	 */
 
-	skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+	skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
 	if (skb == NULL)
 		return NULL;
 
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 05afb57..2c0e457 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -338,7 +338,7 @@
 		return -ENOENT;
 
 	hash = cipso_v4_map_cache_hash(key, key_len);
-	bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+	bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
 	spin_lock_bh(&cipso_v4_cache[bkt].lock);
 	list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
 		if (entry->hash == hash &&
@@ -417,7 +417,7 @@
 	atomic_inc(&secattr->cache->refcount);
 	entry->lsm_data = secattr->cache;
 
-	bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+	bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
 	spin_lock_bh(&cipso_v4_cache[bkt].lock);
 	if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
 		list_add(&entry->list, &cipso_v4_cache[bkt].list);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6250f42..2769dc4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -292,7 +292,7 @@
 	struct iphdr *pip;
 	struct igmpv3_report *pig;
 
-	skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+	skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
 	if (skb == NULL)
 		return NULL;
 
@@ -653,7 +653,7 @@
 		return -1;
 	}
 
-	skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+	skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
 	if (skb == NULL) {
 		ip_rt_put(rt);
 		return -1;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 89dee43..ed45037 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -710,14 +710,14 @@
 	struct net_device *dev = d->dev;
 	struct sk_buff *skb;
 	struct bootp_pkt *b;
-	int hh_len = LL_RESERVED_SPACE(dev);
 	struct iphdr *h;
 
 	/* Allocate packet */
-	skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL);
+	skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+			GFP_KERNEL);
 	if (!skb)
 		return;
-	skb_reserve(skb, hh_len);
+	skb_reserve(skb, LL_RESERVED_SPACE(dev));
 	b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
 	memset(b, 0, sizeof(struct bootp_pkt));
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11d7f75..fead049 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -322,7 +322,6 @@
 			unsigned int flags)
 {
 	struct inet_sock *inet = inet_sk(sk);
-	int hh_len;
 	struct iphdr *iph;
 	struct sk_buff *skb;
 	unsigned int iphlen;
@@ -336,13 +335,12 @@
 	if (flags&MSG_PROBE)
 		goto out;
 
-	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
-
-	skb = sock_alloc_send_skb(sk, length+hh_len+15,
-				  flags&MSG_DONTWAIT, &err);
+	skb = sock_alloc_send_skb(sk,
+				  length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, hh_len);
+	skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 26c9369..b54d9d3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1842,9 +1842,16 @@
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 		}
 
-		/* Don't lost mark skbs that were fwd transmitted after RTO */
-		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) &&
-		    !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) {
+		/* Marking forward transmissions that were made after RTO lost
+		 * can cause unnecessary retransmissions in some scenarios,
+		 * SACK blocks will mitigate that in some but not in all cases.
+		 * We used to not mark them but it was causing break-ups with
+		 * receivers that do only in-order receival.
+		 *
+		 * TODO: we could detect presence of such receiver and select
+		 * different behavior per flow.
+		 */
+		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
 			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 			tp->lost_out += tcp_skb_pcount(skb);
 		}
@@ -1860,7 +1867,7 @@
 	tp->reordering = min_t(unsigned int, tp->reordering,
 			       sysctl_tcp_reordering);
 	tcp_set_ca_state(sk, TCP_CA_Loss);
-	tp->high_seq = tp->frto_highmark;
+	tp->high_seq = tp->snd_nxt;
 	TCP_ECN_queue_cwr(tp);
 
 	tcp_clear_retrans_hints_partial(tp);
@@ -2482,7 +2489,7 @@
 
 	tcp_verify_left_out(tp);
 
-	if (tp->retrans_out == 0)
+	if (!tp->frto_counter && tp->retrans_out == 0)
 		tp->retrans_stamp = 0;
 
 	if (flag & FLAG_ECE)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0af2e05..48cdce9 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -780,7 +780,7 @@
 		 *	Allocate buffer.
 		 */
 
-		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
+		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 			IP6_INC_STATS(ip6_dst_idev(skb->dst),
 				      IPSTATS_MIB_FRAGFAILS);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 54f91ef..fd632dd 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1411,7 +1411,7 @@
 		     IPV6_TLV_PADN, 0 };
 
 	/* we assume size > sizeof(ra) here */
-	skb = sock_alloc_send_skb(sk, size + LL_RESERVED_SPACE(dev), 1, &err);
+	skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err);
 
 	if (!skb)
 		return NULL;
@@ -1790,7 +1790,7 @@
 	payload_len = len + sizeof(ra);
 	full_len = sizeof(struct ipv6hdr) + payload_len;
 
-	skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err);
+	skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
 
 	if (skb == NULL) {
 		rcu_read_lock();
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2c74885..a55fc05 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -479,7 +479,7 @@
 
 	skb = sock_alloc_send_skb(sk,
 				  (MAX_HEADER + sizeof(struct ipv6hdr) +
-				   len + LL_RESERVED_SPACE(dev)),
+				   len + LL_ALLOCATED_SPACE(dev)),
 				  1, &err);
 	if (!skb) {
 		ND_PRINTK0(KERN_ERR
@@ -1521,7 +1521,7 @@
 
 	buff = sock_alloc_send_skb(sk,
 				   (MAX_HEADER + sizeof(struct ipv6hdr) +
-				    len + LL_RESERVED_SPACE(dev)),
+				    len + LL_ALLOCATED_SPACE(dev)),
 				   1, &err);
 	if (buff == NULL) {
 		ND_PRINTK0(KERN_ERR
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 396f0ea..232e0dc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -609,7 +609,6 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct ipv6hdr *iph;
 	struct sk_buff *skb;
-	unsigned int hh_len;
 	int err;
 
 	if (length > rt->u.dst.dev->mtu) {
@@ -619,13 +618,12 @@
 	if (flags&MSG_PROBE)
 		goto out;
 
-	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
-
-	skb = sock_alloc_send_skb(sk, length+hh_len+15,
-				  flags&MSG_DONTWAIT, &err);
+	skb = sock_alloc_send_skb(sk,
+				  length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+				  flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto error;
-	skb_reserve(skb, hh_len);
+	skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
 
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index bfacef8..a6f99b5 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -40,6 +40,8 @@
 
 #include <net/irda/discovery.h>
 
+#include <asm/unaligned.h>
+
 /*
  * Function irlmp_add_discovery (cachelog, discovery)
  *
@@ -87,7 +89,7 @@
 			 */
 			hashbin_remove_this(cachelog, (irda_queue_t *) node);
 			/* Check if hints bits are unchanged */
-			if(u16ho(node->data.hints) == u16ho(new->data.hints))
+			if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints))
 				/* Set time of first discovery for this node */
 				new->firststamp = node->firststamp;
 			kfree(node);
@@ -281,9 +283,9 @@
 		/* Mask out the ones we don't want :
 		 * We want to match the discovery mask, and to get only
 		 * the most recent one (unless we want old ones) */
-		if ((u16ho(discovery->data.hints) & mask) &&
+		if ((get_unaligned((__u16 *)discovery->data.hints) & mask) &&
 		    ((old_entries) ||
-		     ((jiffies - discovery->firststamp) < j_timeout)) ) {
+		     ((jiffies - discovery->firststamp) < j_timeout))) {
 			/* Create buffer as needed.
 			 * As this function get called a lot and most time
 			 * we don't have anything to put in the log (we are
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 1f81f8e..7bf5b91 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1062,7 +1062,8 @@
 		for(i = 0; i < number; i++) {
 			/* Check if we should notify client */
 			if ((client->expir_callback) &&
-			    (client->hint_mask.word & u16ho(expiries[i].hints)
+			    (client->hint_mask.word &
+			     get_unaligned((__u16 *)expiries[i].hints)
 			     & 0x7f7f) )
 				client->expir_callback(&(expiries[i]),
 						       EXPIRY_TIMEOUT,
@@ -1086,7 +1087,7 @@
 
 	IRDA_ASSERT(irlmp != NULL, return NULL;);
 
-	u16ho(irlmp->discovery_rsp.data.hints) = irlmp->hints.word;
+	put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints);
 
 	/*
 	 *  Set character set for device name (we use ASCII), and
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index 75497e5..a3ec002 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -1673,7 +1673,7 @@
   /* Notify the control channel */
   irnet_post_event(NULL, IRNET_DISCOVER,
 		   discovery->saddr, discovery->daddr, discovery->info,
-		   u16ho(discovery->hints));
+		   get_unaligned((__u16 *)discovery->hints));
 
   DEXIT(IRDA_OCB_TRACE, "\n");
 }
@@ -1704,7 +1704,7 @@
   /* Notify the control channel */
   irnet_post_event(NULL, IRNET_EXPIRE,
 		   expiry->saddr, expiry->daddr, expiry->info,
-		   u16ho(expiry->hints));
+		   get_unaligned((__u16 *)expiry->hints));
 
   DEXIT(IRDA_OCB_TRACE, "\n");
 }
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 879e721..19efc3a 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -255,14 +255,23 @@
 void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
 {
 	char buf[50];
+	struct ieee80211_key *key;
 
 	if (!sdata->debugfsdir)
 		return;
 
-	sprintf(buf, "../keys/%d", sdata->default_key->debugfs.cnt);
-	sdata->debugfs.default_key =
-		debugfs_create_symlink("default_key", sdata->debugfsdir, buf);
+	/* this is running under the key lock */
+
+	key = sdata->default_key;
+	if (key) {
+		sprintf(buf, "../keys/%d", key->debugfs.cnt);
+		sdata->debugfs.default_key =
+			debugfs_create_symlink("default_key",
+					       sdata->debugfsdir, buf);
+	} else
+		ieee80211_debugfs_key_remove_default(sdata);
 }
+
 void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
 {
 	if (!sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 80954a5..06e88a5 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -54,6 +54,15 @@
 	if (!ndev)
 		return -ENOMEM;
 
+	ndev->needed_headroom = local->tx_headroom +
+				4*6 /* four MAC addresses */
+				+ 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+				+ 6 /* mesh */
+				+ 8 /* rfc1042/bridge tunnel */
+				- ETH_HLEN /* ethernet hard_header_len */
+				+ IEEE80211_ENCRYPT_HEADROOM;
+	ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
 	ret = dev_alloc_name(ndev, ndev->name);
 	if (ret < 0)
 		goto fail;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index f76bc26..697ef67 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -397,7 +397,7 @@
 	put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum);
 	sdata->u.sta.mesh_seqnum++;
 
-	return 5;
+	return 6;
 }
 
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3df8092..af0cd1e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -120,7 +120,7 @@
 		*pos++ = WLAN_EID_PREP;
 		break;
 	default:
-		kfree(skb);
+		kfree_skb(skb);
 		return -ENOTSUPP;
 		break;
 	}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 5845dc2..99c2d36 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -158,19 +158,25 @@
 	if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
 		return -ENOSPC;
 
-	read_lock(&pathtbl_resize_lock);
-
 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
 	if (!new_mpath) {
 		atomic_dec(&sdata->u.sta.mpaths);
 		err = -ENOMEM;
 		goto endadd2;
 	}
+	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+	if (!new_node) {
+		kfree(new_mpath);
+		atomic_dec(&sdata->u.sta.mpaths);
+		err = -ENOMEM;
+		goto endadd2;
+	}
+
+	read_lock(&pathtbl_resize_lock);
 	memcpy(new_mpath->dst, dst, ETH_ALEN);
 	new_mpath->dev = dev;
 	new_mpath->flags = 0;
 	skb_queue_head_init(&new_mpath->frame_queue);
-	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
 	new_node->mpath = new_mpath;
 	new_mpath->timer.data = (unsigned long) new_mpath;
 	new_mpath->timer.function = mesh_path_timer;
@@ -202,7 +208,6 @@
 
 endadd:
 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
-endadd2:
 	read_unlock(&pathtbl_resize_lock);
 	if (!err && grow) {
 		struct mesh_table *oldtbl, *newtbl;
@@ -215,10 +220,12 @@
 			return -ENOMEM;
 		}
 		rcu_assign_pointer(mesh_paths, newtbl);
+		write_unlock(&pathtbl_resize_lock);
+
 		synchronize_rcu();
 		mesh_table_free(oldtbl, false);
-		write_unlock(&pathtbl_resize_lock);
 	}
+endadd2:
 	return err;
 }
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a5e5c31..4adba09 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -665,6 +665,26 @@
 	mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
 }
 
+static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss,
+				      struct ieee80211_supported_band *sband,
+				      u64 *rates)
+{
+	int i, j, count;
+	*rates = 0;
+	count = 0;
+	for (i = 0; i < bss->supp_rates_len; i++) {
+		int rate = (bss->supp_rates[i] & 0x7F) * 5;
+
+		for (j = 0; j < sband->n_bitrates; j++)
+			if (sband->bitrates[j].bitrate == rate) {
+				*rates |= BIT(j);
+				count++;
+				break;
+			}
+	}
+
+	return count;
+}
 
 static void ieee80211_send_assoc(struct net_device *dev,
 				 struct ieee80211_if_sta *ifsta)
@@ -673,11 +693,12 @@
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 	u8 *pos, *ies;
-	int i, len;
+	int i, len, count, rates_len, supp_rates_len;
 	u16 capab;
 	struct ieee80211_sta_bss *bss;
 	int wmm = 0;
 	struct ieee80211_supported_band *sband;
+	u64 rates = 0;
 
 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
 			    sizeof(*mgmt) + 200 + ifsta->extra_ie_len +
@@ -740,24 +761,39 @@
 	*pos++ = ifsta->ssid_len;
 	memcpy(pos, ifsta->ssid, ifsta->ssid_len);
 
-	len = sband->n_bitrates;
-	if (len > 8)
-		len = 8;
-	pos = skb_put(skb, len + 2);
-	*pos++ = WLAN_EID_SUPP_RATES;
-	*pos++ = len;
-	for (i = 0; i < len; i++) {
-		int rate = sband->bitrates[i].bitrate;
-		*pos++ = (u8) (rate / 5);
-	}
+	/* all supported rates should be added here but some APs
+	 * (e.g. D-Link DAP 1353 in b-only mode) don't like that
+	 * Therefore only add rates the AP supports */
+	rates_len = ieee80211_compatible_rates(bss, sband, &rates);
+	supp_rates_len = rates_len;
+	if (supp_rates_len > 8)
+		supp_rates_len = 8;
 
-	if (sband->n_bitrates > len) {
-		pos = skb_put(skb, sband->n_bitrates - len + 2);
-		*pos++ = WLAN_EID_EXT_SUPP_RATES;
-		*pos++ = sband->n_bitrates - len;
-		for (i = len; i < sband->n_bitrates; i++) {
+	len = sband->n_bitrates;
+	pos = skb_put(skb, supp_rates_len + 2);
+	*pos++ = WLAN_EID_SUPP_RATES;
+	*pos++ = supp_rates_len;
+
+	count = 0;
+	for (i = 0; i < sband->n_bitrates; i++) {
+		if (BIT(i) & rates) {
 			int rate = sband->bitrates[i].bitrate;
 			*pos++ = (u8) (rate / 5);
+			if (++count == 8)
+				break;
+		}
+	}
+
+	if (count == 8) {
+		pos = skb_put(skb, rates_len - count + 2);
+		*pos++ = WLAN_EID_EXT_SUPP_RATES;
+		*pos++ = rates_len - count;
+
+		for (i++; i < sband->n_bitrates; i++) {
+			if (BIT(i) & rates) {
+				int rate = sband->bitrates[i].bitrate;
+				*pos++ = (u8) (rate / 5);
+			}
 		}
 	}
 
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 02f436a..1958bfb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1305,11 +1305,11 @@
 		if (is_multicast_ether_addr(skb->data)) {
 			if (*mesh_ttl > 0) {
 				xmit_skb = skb_copy(skb, GFP_ATOMIC);
-				if (!xmit_skb && net_ratelimit())
+				if (xmit_skb)
+					xmit_skb->pkt_type = PACKET_OTHERHOST;
+				else if (net_ratelimit())
 					printk(KERN_DEBUG "%s: failed to clone "
 					       "multicast frame\n", dev->name);
-				else
-					xmit_skb->pkt_type = PACKET_OTHERHOST;
 			} else
 				IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta,
 							     dropped_frames_ttl);
@@ -1395,7 +1395,7 @@
 		padding = ((4 - subframe_len) & 0x3);
 		/* the last MSDU has no padding */
 		if (subframe_len > remaining) {
-			printk(KERN_DEBUG "%s: wrong buffer size", dev->name);
+			printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name);
 			return RX_DROP_UNUSABLE;
 		}
 
@@ -1418,7 +1418,7 @@
 			eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
 							padding);
 			if (!eth) {
-				printk(KERN_DEBUG "%s: wrong buffer size ",
+				printk(KERN_DEBUG "%s: wrong buffer size\n",
 				       dev->name);
 				dev_kfree_skb(frame);
 				return RX_DROP_UNUSABLE;
@@ -1952,7 +1952,7 @@
 		if (!skb_new) {
 			if (net_ratelimit())
 				printk(KERN_DEBUG "%s: failed to copy "
-				       "multicast frame for %s",
+				       "multicast frame for %s\n",
 				       wiphy_name(local->hw.wiphy),
 				       prev->dev->name);
 			continue;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f35eaea..1d7dd54 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1562,13 +1562,13 @@
 	 * be cloned. This could happen, e.g., with Linux bridge code passing
 	 * us broadcast frames. */
 
-	if (head_need > 0 || skb_cloned(skb)) {
+	if (head_need > 0 || skb_header_cloned(skb)) {
 #if 0
 		printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes "
 		       "of headroom\n", dev->name, head_need);
 #endif
 
-		if (skb_cloned(skb))
+		if (skb_header_cloned(skb))
 			I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
 		else
 			I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -1898,6 +1898,7 @@
 			control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
 		control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
 		control->flags |= IEEE80211_TXCTL_NO_ACK;
+		control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
 		control->retry_limit = 1;
 		control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
 	}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cc9f715..24a465c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -153,15 +153,15 @@
 	/* 7.1.3.5a.2 */
 	switch (ae) {
 	case 0:
-		return 5;
+		return 6;
 	case 1:
-		return 11;
+		return 12;
 	case 2:
-		return 17;
+		return 18;
 	case 3:
-		return 23;
+		return 24;
 	default:
-		return 5;
+		return 6;
 	}
 }
 
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 64faa3d..dc1598b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -394,7 +394,8 @@
 						 qd->handle);
 		if (!q->queues[i]) {
 			q->queues[i] = &noop_qdisc;
-			printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
+			printk(KERN_ERR "%s child qdisc %i creation failed\n",
+			       dev->name, i);
 		}
 	}
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 16774ec..0edefcf 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -472,6 +472,9 @@
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
+	if (ctnetlink_dump_id(skb, ct) < 0)
+		goto nla_put_failure;
+
 	if (events & IPCT_DESTROY) {
 		if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
 		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 500528d..c63e933 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -179,3 +179,5 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>");
 MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
+MODULE_ALIAS("ipt_iprange");
+MODULE_ALIAS("ip6t_iprange");
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2507024..2cee87d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -743,7 +743,7 @@
 	if (len > dev->mtu+reserve)
 		goto out_unlock;
 
-	skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
+	skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
 				msg->msg_flags & MSG_DONTWAIT, &err);
 	if (skb==NULL)
 		goto out_unlock;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 81b6064..bbc7107 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2418,7 +2418,8 @@
 				break;
 
 			case SCTP_PARAM_IPV6_ADDRESS:
-				asoc->peer.ipv6_address = 1;
+				if (PF_INET6 == asoc->base.sk->sk_family)
+					asoc->peer.ipv6_address = 1;
 				break;
 
 			case SCTP_PARAM_HOST_NAME_ADDRESS:
@@ -2829,6 +2830,19 @@
 	addr_param = (union sctp_addr_param *)
 			((void *)asconf_param + sizeof(sctp_addip_param_t));
 
+	switch (addr_param->v4.param_hdr.type) {
+	case SCTP_PARAM_IPV6_ADDRESS:
+		if (!asoc->peer.ipv6_address)
+			return SCTP_ERROR_INV_PARAM;
+		break;
+	case SCTP_PARAM_IPV4_ADDRESS:
+		if (!asoc->peer.ipv4_address)
+			return SCTP_ERROR_INV_PARAM;
+		break;
+	default:
+		return SCTP_ERROR_INV_PARAM;
+	}
+
 	af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type));
 	if (unlikely(!af))
 		return SCTP_ERROR_INV_PARAM;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 09cd9c0..3f964db 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -25,11 +25,11 @@
 	struct dst_entry *dst = skb->dst;
 	int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
 		- skb_headroom(skb);
+	int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
 
-	if (nhead > 0)
-		return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
+	if (nhead > 0 || ntail > 0)
+		return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
 
-	/* Check tail too... */
 	return 0;
 }