Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6 into upstream-fixes
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 421bcff..43ab119 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -57,6 +57,15 @@
 
 ---------------------------
 
+What:	sbp2: module parameter "force_inquiry_hack"
+When:	July 2006
+Why:	Superceded by parameter "workarounds". Both parameters are meant to be
+	used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+	therefore the impact of this feature replacement should be low.
+Who:	Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
 What:	Video4Linux API 1 ioctls and video_decoder.h from Video devices.
 When:	July 2006
 Why:	V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index c5beb54..21ed511 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -36,6 +36,9 @@
 some data to the device.  So a very simple watchdog daemon would look
 like this:
 
+#include <stdlib.h>
+#include <fcntl.h>
+
 int main(int argc, const char *argv[]) {
 	int fd=open("/dev/watchdog",O_WRONLY);
 	if (fd==-1) {
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 98356f8..02e188d 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -95,7 +95,10 @@
 	for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
 		set_irq_chip(irq, &mainstone_irq_chip);
 		set_irq_handler(irq, do_level_IRQ);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
+			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
+		else
+			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
 	}
 	set_irq_flags(MAINSTONE_IRQ(8), 0);
 	set_irq_flags(MAINSTONE_IRQ(12), 0);
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 832fb86..73de2ea 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -59,8 +59,7 @@
 	mrc	p15, 0, r5, c13, c0, 0	@ PID
 	mrc	p15, 0, r6, c3, c0, 0	@ Domain ID
 	mrc	p15, 0, r7, c2, c0, 0	@ translation table base address
-	mrc	p15, 0, r8, c2, c0, 0	@ auxiliary control register
-	mrc	p15, 0, r9, c1, c0, 0	@ control register
+	mrc	p15, 0, r8, c1, c0, 0	@ control register
 
 	stmia	r0, { r4 - r13 }
 
@@ -165,7 +164,6 @@
 	mcr	p15, 0, r5, c13, c0, 0		@ PID
 	mcr	p15, 0, r6, c3, c0, 0		@ Domain ID
 	mcr	p15, 0, r7, c2, c0, 0		@ translation table base
-	mcr	p15, 0, r8, c1, c1, 0		@ auxilliary control
 
 #ifdef CONFIG_DEBUG_RESUME
 	mov	r3, #'R'
@@ -173,7 +171,7 @@
 #endif
 
 	ldr	r2, =resume_with_mmu
-	mcr	p15, 0, r9, c1, c0, 0		@ turn on MMU, etc
+	mcr	p15, 0, r8, c1, c0, 0		@ turn on MMU, etc
 	nop					@ second-to-last before mmu
 	mov	pc, r2				@ go back to virtual address
 
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index f6a8853..9ea3539 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -134,7 +134,7 @@
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
 CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
 CONFIG_VIRTUAL_MEM_MAP=y
 CONFIG_HOLES_IN_ZONE=y
 CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -1159,7 +1159,7 @@
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
 CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7956eb9..d58c1c5 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -416,7 +416,7 @@
 	ia64_vector vec = irq_to_vector(irq);
 	struct iosapic_rte_info *rte;
 
-	move_irq(irq);
+	move_native_irq(irq);
 	list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
 		iosapic_eoi(rte->addr, vec);
 }
@@ -458,7 +458,7 @@
 {
 	irq_desc_t *idesc = irq_descp(irq);
 
-	move_irq(irq);
+	move_native_irq(irq);
 	/*
 	 * Once we have recorded IRQ_PENDING already, we can mask the
 	 * interrupt for real. This prevents IRQ storms from unhandled
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5ce908e..9c72ea3 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -101,7 +101,6 @@
 
 	if (irq < NR_IRQS) {
 		irq_affinity[irq] = mask;
-		set_irq_info(irq, mask);
 		irq_redir[irq] = (char) (redir & 0xff);
 	}
 }
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index a13395e..fa2ba9e 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -33,11 +33,6 @@
  *	82801E   (C-ICH)  : document number 273599-001, 273645-002,
  *	82801EB  (ICH5)   : document number 252516-001, 252517-003,
  *	82801ER  (ICH5R)  : document number 252516-001, 252517-003,
- *	82801FB  (ICH6)   : document number 301473-002, 301474-007,
- *	82801FR  (ICH6R)  : document number 301473-002, 301474-007,
- *	82801FBM (ICH6-M) : document number 301473-002, 301474-007,
- *	82801FW  (ICH6W)  : document number 301473-001, 301474-007,
- *	82801FRW (ICH6RW) : document number 301473-001, 301474-007
  *
  *  20000710 Nils Faerber
  *	Initial Version 0.01
@@ -66,6 +61,10 @@
  *  20050807 Wim Van Sebroeck <wim@iguana.be>
  *	0.08 Make sure that the watchdog is only "armed" when started.
  *	     (Kernel Bug 4251)
+ *  20060416 Wim Van Sebroeck <wim@iguana.be>
+ *	0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
+ *	     ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
+ *	     chipsets)
  */
 
 /*
@@ -90,7 +89,7 @@
 #include "i8xx_tco.h"
 
 /* Module and version information */
-#define TCO_VERSION "0.08"
+#define TCO_VERSION "0.09"
 #define TCO_MODULE_NAME "i8xx TCO timer"
 #define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
 #define PFX TCO_MODULE_NAME ": "
@@ -391,11 +390,6 @@
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,	PCI_ANY_ID, PCI_ANY_ID, },
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0,	PCI_ANY_ID, PCI_ANY_ID, },
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,	PCI_ANY_ID, PCI_ANY_ID, },
-	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,	PCI_ANY_ID, PCI_ANY_ID, },
-	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,	PCI_ANY_ID, PCI_ANY_ID, },
-	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2,	PCI_ANY_ID, PCI_ANY_ID, },
-	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,	PCI_ANY_ID, PCI_ANY_ID, },
-	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,	PCI_ANY_ID, PCI_ANY_ID, },
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,	PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0, },			/* End of list */
 };
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 9dc5473..1ea04e9 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -423,6 +423,12 @@
 	if (tmr_atboot && started == 0) {
 		printk(KERN_INFO PFX "Starting Watchdog Timer\n");
 		s3c2410wdt_start();
+	} else if (!tmr_atboot) {
+		/* if we're not enabling the watchdog, then ensure it is
+		 * disabled if it has been left running from the bootloader
+		 * or other source */
+
+		s3c2410wdt_stop();
 	}
 
 	return 0;
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 515ce75..20b88f9 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -377,7 +377,7 @@
 {
 	int ret;
 
-	printk(banner);
+	printk("%s\n", banner);
 
 	spin_lock_init(&sc1200wdt_lock);
 	sema_init(&open_sem, 1);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 1922287..11f1377 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -553,7 +553,7 @@
 	 * register content.
 	 * To actually enable physical responses is the job of our interrupt
 	 * handler which programs the physical request filter. */
-	reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
+	reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
 
 	DBGMSG("physUpperBoundOffset=%08x",
 	       reg_read(ohci, OHCI1394_PhyUpperBound));
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f420660..8a23fb5 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -42,6 +42,7 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/string.h>
+#include <linux/stringify.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/fs.h>
@@ -117,7 +118,8 @@
  */
 static int max_sectors = SBP2_MAX_SECTORS;
 module_param(max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
+		 __stringify(SBP2_MAX_SECTORS) ")");
 
 /*
  * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@
 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
 
 /*
- * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
- * if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows inquiry
- * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
+ * If any of the following workarounds is required for your device to work,
+ * please submit the kernel messages logged by sbp2 to the linux1394-devel
+ * mailing list.
  *
- * If force_inquiry_hack=1 is required for your device to work,
- * please submit the logged sbp2_firmware_revision value of this device to
- * the linux1394-devel mailing list.
+ * - 128kB max transfer
+ *   Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ *   When scsi_mod probes the device, let the inquiry command look like that
+ *   from MS Windows.
+ *
+ * - skip mode page 8
+ *   Suppress sending of mode_sense for mode page 8 if the device pretends to
+ *   support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ *   Tell sd_mod to correct the last sector number reported by read_capacity.
+ *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ *   Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ *   Instead of adding to the built-in blacklist, use only the workarounds
+ *   specified in the module load parameter.
+ *   Useful if a blacklist entry interfered with a non-broken device.
  */
+static int sbp2_default_workarounds;
+module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+	", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+	", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
+	", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+	", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+	", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+	", or a combination)");
+
+/* legacy parameter */
 static int force_inquiry_hack;
 module_param(force_inquiry_hack, int, 0644);
-MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
 
 /*
  * Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@
 };
 
 /*
- * List of device firmwares that require the inquiry hack.
- * Yields a few false positives but did not break other devices so far.
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best indicator
+ * for the type of bridge chip of a device.  It yields a few false positives
+ * but this did not break correctly behaving devices so far.
  */
-static u32 sbp2_broken_inquiry_list[] = {
-	0x00002800,	/* Stefan Richter <stefanr@s5r6.in-berlin.de> */
-			/* DViCO Momobay CX-1 */
-	0x00000200	/* Andreas Plesch <plesch@fas.harvard.edu> */
-			/* QPS Fire DVDBurner */
+static const struct {
+	u32 firmware_revision;
+	u32 model_id;
+	unsigned workarounds;
+} sbp2_workarounds_table[] = {
+	/* TSB42AA9 */ {
+		.firmware_revision	= 0x002800,
+		.workarounds		= SBP2_WORKAROUND_INQUIRY_36 |
+					  SBP2_WORKAROUND_MODE_SENSE_8,
+	},
+	/* Initio bridges, actually only needed for some older ones */ {
+		.firmware_revision	= 0x000200,
+		.workarounds		= SBP2_WORKAROUND_INQUIRY_36,
+	},
+	/* Symbios bridge */ {
+		.firmware_revision	= 0xa0b800,
+		.workarounds		= SBP2_WORKAROUND_128K_MAX_TRANS,
+	},
+	/*
+	 * Note about the following Apple iPod blacklist entries:
+	 *
+	 * There are iPods (2nd gen, 3rd gen) with model_id==0.  Since our
+	 * matching logic treats 0 as a wildcard, we cannot match this ID
+	 * without rewriting the matching routine.  Fortunately these iPods
+	 * do not feature the read_capacity bug according to one report.
+	 * Read_capacity behaviour as well as model_id could change due to
+	 * Apple-supplied firmware updates though.
+	 */
+	/* iPod 4th generation */ {
+		.firmware_revision	= 0x0a2700,
+		.model_id		= 0x000021,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	},
+	/* iPod mini */ {
+		.firmware_revision	= 0x0a2700,
+		.model_id		= 0x000023,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	},
+	/* iPod Photo */ {
+		.firmware_revision	= 0x0a2700,
+		.model_id		= 0x00007e,
+		.workarounds		= SBP2_WORKAROUND_FIX_CAPACITY,
+	}
 };
 
 /**************************************
@@ -765,11 +835,16 @@
 
 	/* Register the status FIFO address range. We could use the same FIFO
 	 * for targets at different nodes. However we need different FIFOs per
-	 * target in order to support multi-unit devices. */
+	 * target in order to support multi-unit devices.
+	 * The FIFO is located out of the local host controller's physical range
+	 * but, if possible, within the posted write area. Status writes will
+	 * then be performed as unified transactions. This slightly reduces
+	 * bandwidth usage, and some Prolific based devices seem to require it.
+	 */
 	scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
 			&sbp2_highlevel, ud->ne->host, &sbp2_ops,
 			sizeof(struct sbp2_status_block), sizeof(quadlet_t),
-			~0ULL, ~0ULL);
+			0x010000000000ULL, CSR1212_ALL_SPACE_END);
 	if (!scsi_id->status_fifo_addr) {
 		SBP2_ERR("failed to allocate status FIFO address range");
 		goto failed_alloc;
@@ -1450,7 +1525,8 @@
 	struct csr1212_dentry *dentry;
 	u64 management_agent_addr;
 	u32 command_set_spec_id, command_set, unit_characteristics,
-	    firmware_revision, workarounds;
+	    firmware_revision;
+	unsigned workarounds;
 	int i;
 
 	SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@
 		case SBP2_FIRMWARE_REVISION_KEY:
 			/* Firmware revision */
 			firmware_revision = kv->value.immediate;
-			if (force_inquiry_hack)
-				SBP2_INFO("sbp2_firmware_revision = %x",
-					  (unsigned int)firmware_revision);
-			else
-				SBP2_DEBUG("sbp2_firmware_revision = %x",
-					   (unsigned int)firmware_revision);
+			SBP2_DEBUG("sbp2_firmware_revision = %x",
+				   (unsigned int)firmware_revision);
 			break;
 
 		default:
@@ -1519,41 +1591,44 @@
 		}
 	}
 
-	/* This is the start of our broken device checking. We try to hack
-	 * around oddities and known defects.  */
-	workarounds = 0x0;
-
-	/* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
-	 * bridge with 128KB max transfer size limitation. For sanity, we
-	 * only voice this when the current max_sectors setting
-	 * exceeds the 128k limit. By default, that is not the case.
-	 *
-	 * It would be really nice if we could detect this before the scsi
-	 * host gets initialized. That way we can down-force the
-	 * max_sectors to account for it. That is not currently
-	 * possible.  */
-	if ((firmware_revision & 0xffff00) ==
-			SBP2_128KB_BROKEN_FIRMWARE &&
-			(max_sectors * 512) > (128*1024)) {
-		SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
-				NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
-		SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
-				max_sectors);
-		workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
+	workarounds = sbp2_default_workarounds;
+	if (force_inquiry_hack) {
+		SBP2_WARN("force_inquiry_hack is deprecated. "
+			  "Use parameter 'workarounds' instead.");
+		workarounds |= SBP2_WORKAROUND_INQUIRY_36;
 	}
 
-	/* Check for a blacklisted set of devices that require us to force
-	 * a 36 byte host inquiry. This can be overriden as a module param
-	 * (to force all hosts).  */
-	for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
-		if ((firmware_revision & 0xffff00) ==
-				sbp2_broken_inquiry_list[i]) {
-			SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
-					NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
-			workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
-			break; /* No need to continue. */
+	if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
+		for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+			if (sbp2_workarounds_table[i].firmware_revision &&
+			    sbp2_workarounds_table[i].firmware_revision !=
+			    (firmware_revision & 0xffff00))
+				continue;
+			if (sbp2_workarounds_table[i].model_id &&
+			    sbp2_workarounds_table[i].model_id != ud->model_id)
+				continue;
+			workarounds |= sbp2_workarounds_table[i].workarounds;
+			break;
 		}
-	}
+
+	if (workarounds)
+		SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
+			  "(firmware_revision 0x%06x, vendor_id 0x%06x,"
+			  " model_id 0x%06x)",
+			  NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+			  workarounds, firmware_revision,
+			  ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+			  ud->model_id);
+
+	/* We would need one SCSI host template for each target to adjust
+	 * max_sectors on the fly, therefore warn only. */
+	if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+	    (max_sectors * 512) > (128 * 1024))
+		SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
+			  "max transfer size. WARNING: Current max_sectors "
+			  "setting is larger than 128KB (%d sectors)",
+			  NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+			  max_sectors);
 
 	/* If this is a logical unit directory entry, process the parent
 	 * to get the values. */
@@ -2447,19 +2522,25 @@
 
 	scsi_id->sdev = sdev;
 
-	if (force_inquiry_hack ||
-	    scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+	if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
 		sdev->inquiry_len = 36;
-		sdev->skip_ms_page_8 = 1;
-	}
 	return 0;
 }
 
 static int sbp2scsi_slave_configure(struct scsi_device *sdev)
 {
+	struct scsi_id_instance_data *scsi_id =
+		(struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
 	blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
 	sdev->use_10_for_rw = 1;
 	sdev->use_10_for_ms = 1;
+
+	if (sdev->type == TYPE_DISK &&
+	    scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+		sdev->skip_ms_page_8 = 1;
+	if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+		sdev->fix_capacity = 1;
 	return 0;
 }
 
@@ -2603,7 +2684,9 @@
 		scsi_driver_template.cmd_per_lun = 1;
 	}
 
-	/* Set max sectors (module load option). Default is 255 sectors. */
+	if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+	    (max_sectors * 512) > (128 * 1024))
+		max_sectors = 128 * 1024 / 512;
 	scsi_driver_template.max_sectors = max_sectors;
 
 	/* Register our high level driver with 1394 stack */
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index e2d357a..f4ccc9d 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -227,11 +227,6 @@
 #define SBP2_SW_VERSION_ENTRY					0x00010483
 
 /*
- * Other misc defines
- */
-#define SBP2_128KB_BROKEN_FIRMWARE				0xa0b800
-
-/*
  * SCSI specific stuff
  */
 
@@ -239,6 +234,13 @@
 #define SBP2_MAX_SECTORS		255	/* Max sectors supported */
 #define SBP2_MAX_CMDS			8	/* This should be safe */
 
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
+#define SBP2_WORKAROUND_INQUIRY_36	0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
+#define SBP2_WORKAROUND_OVERRIDE	0x100
+
 /* This is the two dma types we use for cmd_dma below */
 enum cmd_dma_types {
 	CMD_DMA_NONE,
@@ -268,10 +270,6 @@
 
 };
 
-/* A list of flags for detected oddities and brokeness. */
-#define SBP2_BREAKAGE_128K_MAX_TRANSFER		0x1
-#define SBP2_BREAKAGE_INQUIRY_HACK		0x2
-
 struct sbp2scsi_host_info;
 
 /*
@@ -345,7 +343,7 @@
 	struct Scsi_Host *scsi_host;
 
 	/* Device specific workarounds/brokeness */
-	u32 workarounds;
+	unsigned workarounds;
 };
 
 /* Sbp2 host data structure (one per IEEE1394 host) */
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index 36a32c3..efe147d 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -211,8 +211,10 @@
 	 */
 
 	work = kmalloc(sizeof *work, GFP_KERNEL);
-	if (!work)
+	if (!work) {
+		mmput(mm);
 		return;
+	}
 
 	INIT_WORK(&work->work, ib_umem_account, work);
 	work->mm   = mm;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1985b5d..798e13e 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -182,7 +182,7 @@
 	u8                status;
 };
 
-static int fw_cmd_doorbell = 1;
+static int fw_cmd_doorbell = 0;
 module_param(fw_cmd_doorbell, int, 0644);
 MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
 		 "(and supported by FW)");
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 19765f6..07c13be 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1727,23 +1727,7 @@
 
 	ind = qp->rq.next_ind;
 
-	for (nreq = 0; wr; ++nreq, wr = wr->next) {
-		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
-			nreq = 0;
-
-			doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
-			doorbell[1] = cpu_to_be32(qp->qpn << 8);
-
-			wmb();
-
-			mthca_write64(doorbell,
-				      dev->kar + MTHCA_RECEIVE_DOORBELL,
-				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
-			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
-			size0 = 0;
-		}
-
+	for (nreq = 0; wr; wr = wr->next) {
 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
 			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
 					" %d max, %d nreq)\n", qp->qpn,
@@ -1797,6 +1781,23 @@
 		++ind;
 		if (unlikely(ind >= qp->rq.max))
 			ind -= qp->rq.max;
+
+		++nreq;
+		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+			nreq = 0;
+
+			doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+			doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+			wmb();
+
+			mthca_write64(doorbell,
+				      dev->kar + MTHCA_RECEIVE_DOORBELL,
+				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+			size0 = 0;
+		}
 	}
 
 out:
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index c32ce43..9cbdffa 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -340,7 +340,10 @@
 	/* XXX should send SRP_I_LOGOUT request */
 
 	init_completion(&target->done);
-	ib_send_cm_dreq(target->cm_id, NULL, 0);
+	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+		printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
+		return;
+	}
 	wait_for_completion(&target->done);
 }
 
@@ -351,7 +354,6 @@
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state != SRP_TARGET_DEAD) {
 		spin_unlock_irq(target->scsi_host->host_lock);
-		scsi_host_put(target->scsi_host);
 		return;
 	}
 	target->state = SRP_TARGET_REMOVED;
@@ -365,8 +367,6 @@
 	ib_destroy_cm_id(target->cm_id);
 	srp_free_target_ib(target);
 	scsi_host_put(target->scsi_host);
-	/* And another put to really free the target port... */
-	scsi_host_put(target->scsi_host);
 }
 
 static int srp_connect_target(struct srp_target_port *target)
@@ -1241,7 +1241,7 @@
 	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
 		if (req->scmnd->device == scmnd->device) {
 			req->scmnd->result = DID_RESET << 16;
-			scmnd->scsi_done(scmnd);
+			req->scmnd->scsi_done(req->scmnd);
 			srp_remove_req(target, req);
 		}
 
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 914d62b..5dc4bee 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -310,7 +310,7 @@
 		}
 		else
 			data->bytes_xfered =
-				(data->blocks * (1 << data->blksz_bits)) -
+				(data->blocks * data->blksz) -
 				host->pio.len;
 	}
 
@@ -575,7 +575,7 @@
 au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
 {
 
-	int datalen = data->blocks * (1 << data->blksz_bits);
+	int datalen = data->blocks * data->blksz;
 
 	if (dma != 0)
 		host->flags |= HOST_F_DMA;
@@ -596,7 +596,7 @@
 	if (host->dma.len == 0)
 		return MMC_ERR_TIMEOUT;
 
-	au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host));
+	au_writel(data->blksz - 1, HOST_BLKSIZE(host));
 
 	if (host->flags & HOST_F_DMA) {
 		int i;
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 79358e22..a4eb1d0 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -218,8 +218,10 @@
 	if(!loops)
 		return 0;
 
-	dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
-		loops, where, *pstat, stat_mask);
+	/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
+	if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
+		dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
+			loops, where, *pstat, stat_mask);
 	return loops;
 }
 
@@ -333,6 +335,9 @@
 	WARN_ON(host->cmd != NULL);
 	host->cmd = cmd;
 
+	/* Ensure, that clock are stopped else command programming and start fails */
+	imxmci_stop_clock(host);
+
 	if (cmd->flags & MMC_RSP_BUSY)
 		cmdat |= CMD_DAT_CONT_BUSY;
 
@@ -553,7 +558,7 @@
 	int trans_done = 0;
 	unsigned int stat = *pstat;
 
-	if(host->actual_bus_width == MMC_BUS_WIDTH_4)
+	if(host->actual_bus_width != MMC_BUS_WIDTH_4)
 		burst_len = 16;
 	else
 		burst_len = 64;
@@ -591,8 +596,7 @@
 			stat = MMC_STATUS;
 
 			/* Flush extra bytes from FIFO */
-			while(flush_len >= 2){
-				flush_len -= 2;
+			while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
 				i = MMC_BUFFER_ACCESS;
 				stat = MMC_STATUS;
 				stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
@@ -746,10 +750,6 @@
 			data_dir_mask = STATUS_DATA_TRANS_DONE;
 		}
 
-		imxmci_busy_wait_for_status(host, &stat,
-				data_dir_mask,
-				50, "imxmci_tasklet_fnc data");
-
 		if(stat & data_dir_mask) {
 			clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
 			imxmci_data_done(host, stat);
@@ -865,7 +865,11 @@
 
 		imxmci_stop_clock(host);
 		MMC_CLK_RATE = (prescaler<<3) | clk;
-		imxmci_start_clock(host);
+		/*
+		 * Under my understanding, clock should not be started there, because it would
+		 * initiate SDHC sequencer and send last or random command into card
+		 */
+		/*imxmci_start_clock(host);*/
 
 		dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
 	} else {
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1ca2c8b..6201f30 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -951,6 +951,7 @@
 		data.timeout_ns = card->csd.tacc_ns * 10;
 		data.timeout_clks = card->csd.tacc_clks * 10;
 		data.blksz_bits = 3;
+		data.blksz = 1 << 3;
 		data.blocks = 1;
 		data.flags = MMC_DATA_READ;
 		data.sg = &sg;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 06bd1f4..e39cc05 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -175,6 +175,7 @@
 		brq.data.timeout_ns = card->csd.tacc_ns * 10;
 		brq.data.timeout_clks = card->csd.tacc_clks * 10;
 		brq.data.blksz_bits = md->block_bits;
+		brq.data.blksz = 1 << md->block_bits;
 		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
 		brq.stop.opcode = MMC_STOP_TRANSMISSION;
 		brq.stop.arg = 0;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index f97b472..b49368f 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -119,7 +119,7 @@
 		nob = 0xffff;
 
 	writel(nob, host->base + MMC_NOB);
-	writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
+	writel(data->blksz, host->base + MMC_BLKLEN);
 
 	clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
 	do_div(clks, 1000000000UL);
@@ -283,7 +283,7 @@
 	 * data blocks as being in error.
 	 */
 	if (data->error == MMC_ERR_NONE)
-		data->bytes_xfered = data->blocks << data->blksz_bits;
+		data->bytes_xfered = data->blocks * data->blksz;
 	else
 		data->bytes_xfered = 0;
 
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 39b3d97..8167332 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -662,14 +662,14 @@
 	unsigned long dmaflags;
 
 	DBGF("blksz %04x blks %04x flags %08x\n",
-		1 << data->blksz_bits, data->blocks, data->flags);
+		data->blksz, data->blocks, data->flags);
 	DBGF("tsac %d ms nsac %d clk\n",
 		data->timeout_ns / 1000000, data->timeout_clks);
 
 	/*
 	 * Calculate size.
 	 */
-	host->size = data->blocks << data->blksz_bits;
+	host->size = data->blocks * data->blksz;
 
 	/*
 	 * Check timeout values for overflow.
@@ -696,12 +696,12 @@
 	 * Two bytes are needed for each data line.
 	 */
 	if (host->bus_width == MMC_BUS_WIDTH_1) {
-		blksize = (1 << data->blksz_bits) + 2;
+		blksize = data->blksz + 2;
 
 		wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
 		wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
 	} else if (host->bus_width == MMC_BUS_WIDTH_4) {
-		blksize = (1 << data->blksz_bits) + 2 * 4;
+		blksize = data->blksz + 2 * 4;
 
 		wbsd_write_index(host, WBSD_IDX_PBSMSB,
 			((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f7235c9..7e078b4 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,7 +106,6 @@
  *	0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
  *	0.52: 20 Jan 2006: Add MSI/MSIX support.
  *	0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
- *	0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +117,7 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION		"0.54"
+#define FORCEDETH_VERSION		"0.53"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -711,72 +710,6 @@
 	}
 }
 
-static int using_multi_irqs(struct net_device *dev)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-
-	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
-	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
-	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
-		return 0;
-	else
-		return 1;
-}
-
-static void nv_enable_irq(struct net_device *dev)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-
-	if (!using_multi_irqs(dev)) {
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			enable_irq(dev->irq);
-	} else {
-		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
-		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
-	}
-}
-
-static void nv_disable_irq(struct net_device *dev)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-
-	if (!using_multi_irqs(dev)) {
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			disable_irq(dev->irq);
-	} else {
-		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
-		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
-	}
-}
-
-/* In MSIX mode, a write to irqmask behaves as XOR */
-static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
-{
-	u8 __iomem *base = get_hwbase(dev);
-
-	writel(mask, base + NvRegIrqMask);
-}
-
-static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-	u8 __iomem *base = get_hwbase(dev);
-
-	if (np->msi_flags & NV_MSI_X_ENABLED) {
-		writel(mask, base + NvRegIrqMask);
-	} else {
-		if (np->msi_flags & NV_MSI_ENABLED)
-			writel(0, base + NvRegMSIIrqMask);
-		writel(0, base + NvRegIrqMask);
-	}
-}
-
 #define MII_READ	(-1)
 /* mii_rw: read/write a register on the PHY.
  *
@@ -1086,25 +1019,24 @@
 	struct net_device *dev = (struct net_device *) data;
 	struct fe_priv *np = netdev_priv(dev);
 
-	if (!using_multi_irqs(dev)) {
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			disable_irq(dev->irq);
+
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		disable_irq(dev->irq);
 	} else {
 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
 	}
 	if (nv_alloc_rx(dev)) {
-		spin_lock_irq(&np->lock);
+		spin_lock(&np->lock);
 		if (!np->in_shutdown)
 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-		spin_unlock_irq(&np->lock);
+		spin_unlock(&np->lock);
 	}
-	if (!using_multi_irqs(dev)) {
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			enable_irq(dev->irq);
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		enable_irq(dev->irq);
 	} else {
 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
 	}
@@ -1736,7 +1668,15 @@
 		 * guessed, there is probably a simpler approach.
 		 * Changing the MTU is a rare event, it shouldn't matter.
 		 */
-		nv_disable_irq(dev);
+		if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+		    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+		     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+			disable_irq(dev->irq);
+		} else {
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+		}
 		spin_lock_bh(&dev->xmit_lock);
 		spin_lock(&np->lock);
 		/* stop engines */
@@ -1769,7 +1709,15 @@
 		nv_start_tx(dev);
 		spin_unlock(&np->lock);
 		spin_unlock_bh(&dev->xmit_lock);
-		nv_enable_irq(dev);
+		if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+		    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+		     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+			enable_irq(dev->irq);
+		} else {
+			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+		}
 	}
 	return 0;
 }
@@ -2160,16 +2108,16 @@
 		if (!(events & np->irqmask))
 			break;
 
-		spin_lock_irq(&np->lock);
+		spin_lock(&np->lock);
 		nv_tx_done(dev);
-		spin_unlock_irq(&np->lock);
+		spin_unlock(&np->lock);
 		
 		if (events & (NVREG_IRQ_TX_ERR)) {
 			dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
 						dev->name, events);
 		}
 		if (i > max_interrupt_work) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			/* disable interrupts on the nic */
 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
 			pci_push(base);
@@ -2179,7 +2127,7 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 			break;
 		}
 
@@ -2209,14 +2157,14 @@
 		
 		nv_rx_process(dev);
 		if (nv_alloc_rx(dev)) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			if (!np->in_shutdown)
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 		}
 		
 		if (i > max_interrupt_work) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			/* disable interrupts on the nic */
 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
 			pci_push(base);
@@ -2226,7 +2174,7 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 			break;
 		}
 
@@ -2255,14 +2203,14 @@
 			break;
 		
 		if (events & NVREG_IRQ_LINK) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			nv_link_irq(dev);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 		}
 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			nv_linkchange(dev);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 			np->link_timeout = jiffies + LINK_TIMEOUT;
 		}
 		if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2270,7 +2218,7 @@
 						dev->name, events);
 		}
 		if (i > max_interrupt_work) {
-			spin_lock_irq(&np->lock);
+			spin_lock(&np->lock);
 			/* disable interrupts on the nic */
 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
 			pci_push(base);
@@ -2280,7 +2228,7 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
-			spin_unlock_irq(&np->lock);
+			spin_unlock(&np->lock);
 			break;
 		}
 
@@ -2303,11 +2251,10 @@
 	 * nv_nic_irq because that may decide to do otherwise
 	 */
 
-	if (!using_multi_irqs(dev)) {
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			disable_irq(dev->irq);
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		disable_irq(dev->irq);
 		mask = np->irqmask;
 	} else {
 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2330,12 +2277,11 @@
 	writel(mask, base + NvRegIrqMask);
 	pci_push(base);
 
-	if (!using_multi_irqs(dev)) {
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
 		nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
-		if (np->msi_flags & NV_MSI_X_ENABLED)
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
-		else
-			enable_irq(dev->irq);
+		enable_irq(dev->irq);
 	} else {
 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
 			nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2682,113 +2628,6 @@
 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
 }
 
-static int nv_request_irq(struct net_device *dev)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-	u8 __iomem *base = get_hwbase(dev);
-	int ret = 1;
-	int i;
-
-	if (np->msi_flags & NV_MSI_X_CAPABLE) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
-			np->msi_x_entry[i].entry = i;
-		}
-		if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
-			np->msi_flags |= NV_MSI_X_ENABLED;
-			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
-				/* Request irq for rx handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_err;
-				}
-				/* Request irq for tx handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_free_rx;
-				}
-				/* Request irq for link and timer handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_free_tx;
-				}
-				/* map interrupts to their respective vector */
-				writel(0, base + NvRegMSIXMap0);
-				writel(0, base + NvRegMSIXMap1);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
-			} else {
-				/* Request irq for all interrupts */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_err;
-				}
-
-				/* map interrupts to vector 0 */
-				writel(0, base + NvRegMSIXMap0);
-				writel(0, base + NvRegMSIXMap1);
-			}
-		}
-	}
-	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
-		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
-			np->msi_flags |= NV_MSI_ENABLED;
-			if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
-				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
-				pci_disable_msi(np->pci_dev);
-				np->msi_flags &= ~NV_MSI_ENABLED;
-				goto out_err;
-			}
-
-			/* map interrupts to vector 0 */
-			writel(0, base + NvRegMSIMap0);
-			writel(0, base + NvRegMSIMap1);
-			/* enable msi vector 0 */
-			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
-		}
-	}
-	if (ret != 0) {
-		if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
-			goto out_err;
-	}
-
-	return 0;
-out_free_tx:
-	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
-out_free_rx:
-	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
-out_err:
-	return 1;
-}
-
-static void nv_free_irq(struct net_device *dev)
-{
-	struct fe_priv *np = get_nvpriv(dev);
-	int i;
-
-	if (np->msi_flags & NV_MSI_X_ENABLED) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
-			free_irq(np->msi_x_entry[i].vector, dev);
-		}
-		pci_disable_msix(np->pci_dev);
-		np->msi_flags &= ~NV_MSI_X_ENABLED;
-	} else {
-		free_irq(np->pci_dev->irq, dev);
-		if (np->msi_flags & NV_MSI_ENABLED) {
-			pci_disable_msi(np->pci_dev);
-			np->msi_flags &= ~NV_MSI_ENABLED;
-		}
-	}
-}
-
 static int nv_open(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -2881,16 +2720,12 @@
 	udelay(10);
 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
 
-	nv_disable_hw_interrupts(dev, np->irqmask);
+	writel(0, base + NvRegIrqMask);
 	pci_push(base);
 	writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 	pci_push(base);
 
-	if (nv_request_irq(dev)) {
-		goto out_drain;
-	}
-
 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
 			np->msi_x_entry[i].entry = i;
@@ -2964,7 +2799,7 @@
 	}
 
 	/* ask for interrupts */
-	nv_enable_hw_interrupts(dev, np->irqmask);
+	writel(np->irqmask, base + NvRegIrqMask);
 
 	spin_lock_irq(&np->lock);
 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -3008,6 +2843,7 @@
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base;
+	int i;
 
 	spin_lock_irq(&np->lock);
 	np->in_shutdown = 1;
@@ -3025,13 +2861,31 @@
 
 	/* disable interrupts on the nic or we will lock up */
 	base = get_hwbase(dev);
-	nv_disable_hw_interrupts(dev, np->irqmask);
+	if (np->msi_flags & NV_MSI_X_ENABLED) {
+		writel(np->irqmask, base + NvRegIrqMask);
+	} else {
+		if (np->msi_flags & NV_MSI_ENABLED)
+			writel(0, base + NvRegMSIIrqMask);
+		writel(0, base + NvRegIrqMask);
+	}
 	pci_push(base);
 	dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
 
 	spin_unlock_irq(&np->lock);
 
-	nv_free_irq(dev);
+	if (np->msi_flags & NV_MSI_X_ENABLED) {
+		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+			free_irq(np->msi_x_entry[i].vector, dev);
+		}
+		pci_disable_msix(np->pci_dev);
+		np->msi_flags &= ~NV_MSI_X_ENABLED;
+	} else {
+		free_irq(np->pci_dev->irq, dev);
+		if (np->msi_flags & NV_MSI_ENABLED) {
+			pci_disable_msi(np->pci_dev);
+			np->msi_flags &= ~NV_MSI_ENABLED;
+		}
+	}
 
 	drain_ring(dev);
 
@@ -3120,18 +2974,20 @@
 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
 		/* packet format 3: supports 40-bit addressing */
 		np->desc_ver = DESC_VER_3;
-		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
 		if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
 			printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
 					pci_name(pci_dev));
 		} else {
-			dev->features |= NETIF_F_HIGHDMA;
-			printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+			if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+				printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
+					pci_name(pci_dev));
+				goto out_relreg;
+			} else {
+				dev->features |= NETIF_F_HIGHDMA;
+				printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+			}
 		}
-		if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
-			printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
-			       pci_name(pci_dev));
-		}
+		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
 		/* packet format 2: supports jumbo frames */
 		np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8..b67f586 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@
 		int status;
 
 		dev = nds[i];
+		if (dev == NULL)
+			continue;
 
 		status = pm3386_is_link_up(i);
 		if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@
 
 static int __init enp2611_init_module(void)
 { 
+	int ports;
 	int i;
 
 	if (!machine_is_enp2611())
@@ -199,7 +202,8 @@
 	caleb_reset();
 	pm3386_reset();
 
-	for (i = 0; i < 3; i++) {
+	ports = pm3386_port_count();
+	for (i = 0; i < ports; i++) {
 		nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
 		if (nds[i] == NULL) {
 			while (--i >= 0)
@@ -215,9 +219,10 @@
 
 	ixp2400_msf_init(&enp2611_msf_parameters);
 
-	if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
-		for (i = 0; i < 3; i++)
-			free_netdev(nds[i]);
+	if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+		for (i = 0; i < ports; i++)
+			if (nds[i])
+				free_netdev(nds[i]);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab75..5224651 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@
 	pm3386_reg_write(port >> 1, reg, value);
 }
 
+int pm3386_secondary_present(void)
+{
+	return pm3386_reg_read(1, 0) == 0x3386;
+}
 
 void pm3386_reset(void)
 {
 	u8 mac[3][6];
+	int secondary;
+
+	secondary = pm3386_secondary_present();
 
 	/* Save programmed MAC addresses.  */
 	pm3386_get_mac(0, mac[0]);
 	pm3386_get_mac(1, mac[1]);
-	pm3386_get_mac(2, mac[2]);
+	if (secondary)
+		pm3386_get_mac(2, mac[2]);
 
 	/* Assert analog and digital reset.  */
 	pm3386_reg_write(0, 0x002, 0x0060);
-	pm3386_reg_write(1, 0x002, 0x0060);
+	if (secondary)
+		pm3386_reg_write(1, 0x002, 0x0060);
 	mdelay(1);
 
 	/* Deassert analog reset.  */
 	pm3386_reg_write(0, 0x002, 0x0062);
-	pm3386_reg_write(1, 0x002, 0x0062);
+	if (secondary)
+		pm3386_reg_write(1, 0x002, 0x0062);
 	mdelay(10);
 
 	/* Deassert digital reset.  */
 	pm3386_reg_write(0, 0x002, 0x0063);
-	pm3386_reg_write(1, 0x002, 0x0063);
+	if (secondary)
+		pm3386_reg_write(1, 0x002, 0x0063);
 	mdelay(10);
 
 	/* Restore programmed MAC addresses.  */
 	pm3386_set_mac(0, mac[0]);
 	pm3386_set_mac(1, mac[1]);
-	pm3386_set_mac(2, mac[2]);
+	if (secondary)
+		pm3386_set_mac(2, mac[2]);
 
 	/* Disable carrier on all ports.  */
 	pm3386_set_carrier(0, 0);
 	pm3386_set_carrier(1, 0);
-	pm3386_set_carrier(2, 0);
+	if (secondary)
+		pm3386_set_carrier(2, 0);
 }
 
 static u16 swaph(u16 x)
@@ -127,6 +140,11 @@
 	return ((x << 8) | (x >> 8)) & 0xffff;
 }
 
+int pm3386_port_count(void)
+{
+	return 2 + pm3386_secondary_present();
+}
+
 void pm3386_init_port(int port)
 {
 	int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb0..cc4183d 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
 #define __PM3386_H
 
 void pm3386_reset(void);
+int pm3386_port_count(void);
 void pm3386_init_port(int port);
 void pm3386_get_mac(int port, u8 *mac);
 void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a70c2b0..63a6b3d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,8 +78,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
-	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },	/* DGE-530T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
 	{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ffd267f..60779eb 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.3"
+#define DRV_VERSION		"1.4"
 #define PFX			DRV_NAME " "
 
 /*
@@ -105,6 +105,7 @@
 static const struct pci_device_id sky2_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -235,6 +236,7 @@
 		}
 
 		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+			sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
 			sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 			reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
 			reg1 &= P_ASPM_CONTROL_MSK;
@@ -306,7 +308,7 @@
 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
 
 	if (sky2->autoneg == AUTONEG_ENABLE &&
-	    (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
+	    !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 
 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -1020,7 +1022,25 @@
 	struct sky2_hw *hw = sky2->hw;
 	unsigned port = sky2->port;
 	u32 ramsize, rxspace, imask;
-	int err = -ENOMEM;
+	int cap, err = -ENOMEM;
+	struct net_device *otherdev = hw->dev[sky2->port^1];
+
+	/*
+ 	 * On dual port PCI-X card, there is an problem where status
+	 * can be received out of order due to split transactions
+	 */
+	if (otherdev && netif_running(otherdev) &&
+ 	    (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ 		struct sky2_port *osky2 = netdev_priv(otherdev);
+ 		u16 cmd;
+
+ 		cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ 		cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ 		sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+
+ 		sky2->rx_csum = 0;
+ 		osky2->rx_csum = 0;
+ 	}
 
 	if (netif_msg_ifup(sky2))
 		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
@@ -1899,6 +1919,12 @@
 	}
 }
 
+/* Is status ring empty or is there more to do? */
+static inline int sky2_more_work(const struct sky2_hw *hw)
+{
+	return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
+}
+
 /* Process status response ring */
 static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 {
@@ -2171,19 +2197,19 @@
 	if (status & Y2_IS_CHK_TXA2)
 		sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
 
-	if (status & Y2_IS_STAT_BMU)
-		sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
 	work_done = sky2_status_intr(hw, work_limit);
 	*budget -= work_done;
 	dev0->quota -= work_done;
 
-	if (work_done >= work_limit)
+	if (status & Y2_IS_STAT_BMU)
+		sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+	if (sky2_more_work(hw))
 		return 1;
 
 	netif_rx_complete(dev0);
 
-	status = sky2_read32(hw, B0_Y2_SP_LISR);
+	sky2_read32(hw, B0_Y2_SP_LISR);
 	return 0;
 }
 
@@ -3067,12 +3093,7 @@
 	sky2->duplex = -1;
 	sky2->speed = -1;
 	sky2->advertising = sky2_supported_modes(hw);
-
-	/* Receive checksum disabled for Yukon XL
-	 * because of observed problems with incorrect
-	 * values when multiple packets are received in one interrupt
-	 */
-	sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
+	sky2->rx_csum = 1;
 
 	spin_lock_init(&sky2->phy_lock);
 	sky2->tx_pending = TX_DEF_PENDING;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8012994..8a0bc55 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -214,6 +214,8 @@
 enum {
 	Y2_VMAIN_AVAIL	= 1<<17,/* VMAIN available (YUKON-2 only) */
 	Y2_VAUX_AVAIL	= 1<<16,/* VAUX available (YUKON-2 only) */
+	Y2_HW_WOL_ON	= 1<<15,/* HW WOL On  (Yukon-EC Ultra A1 only) */
+	Y2_HW_WOL_OFF	= 1<<14,/* HW WOL On  (Yukon-EC Ultra A1 only) */
 	Y2_ASF_ENABLE	= 1<<13,/* ASF Unit Enable (YUKON-2 only) */
 	Y2_ASF_DISABLE	= 1<<12,/* ASF Unit Disable (YUKON-2 only) */
 	Y2_CLK_RUN_ENA	= 1<<11,/* CLK_RUN Enable  (YUKON-2 only) */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index a6dc53b..fdc2103 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -491,8 +491,6 @@
 	u8 tx_thresh, rx_thresh;
 
 	struct mii_if_info mii_if;
-	struct work_struct tx_timeout_task;
-	struct work_struct check_media_task;
 	void __iomem *base;
 };
 
@@ -500,8 +498,6 @@
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
 static void rhine_tx_timeout(struct net_device *dev);
-static void rhine_tx_timeout_task(struct net_device *dev);
-static void rhine_check_media_task(struct net_device *dev);
 static int  rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static void rhine_tx(struct net_device *dev);
@@ -856,12 +852,6 @@
 	if (rp->quirks & rqRhineI)
 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
-	INIT_WORK(&rp->tx_timeout_task,
-		  (void (*)(void *))rhine_tx_timeout_task, dev);
-
-	INIT_WORK(&rp->check_media_task,
-		  (void (*)(void *))rhine_check_media_task, dev);
-
 	/* dev->name not defined before register_netdev()! */
 	rc = register_netdev(dev);
 	if (rc)
@@ -1108,11 +1098,6 @@
 		       netif_carrier_ok(mii->dev));
 }
 
-static void rhine_check_media_task(struct net_device *dev)
-{
-	rhine_check_media(dev, 0);
-}
-
 static void init_registers(struct net_device *dev)
 {
 	struct rhine_private *rp = netdev_priv(dev);
@@ -1166,8 +1151,8 @@
 	if (quirks & rqRhineI) {
 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
 
-		/* Do not call from ISR! */
-		msleep(1);
+		/* Can be called from ISR. Evil. */
+		mdelay(1);
 
 		/* 0x80 must be set immediately before turning it off */
 		iowrite8(0x80, ioaddr + MIICmd);
@@ -1257,16 +1242,6 @@
 static void rhine_tx_timeout(struct net_device *dev)
 {
 	struct rhine_private *rp = netdev_priv(dev);
-
-	/*
-	 * Move bulk of work outside of interrupt context
-	 */
-	schedule_work(&rp->tx_timeout_task);
-}
-
-static void rhine_tx_timeout_task(struct net_device *dev)
-{
-	struct rhine_private *rp = netdev_priv(dev);
 	void __iomem *ioaddr = rp->base;
 
 	printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1677,7 +1652,7 @@
 	spin_lock(&rp->lock);
 
 	if (intr_status & IntrLinkChange)
-		schedule_work(&rp->check_media_task);
+		rhine_check_media(dev, 0);
 	if (intr_status & IntrStatsMax) {
 		rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
 		rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1927,9 +1902,6 @@
 	spin_unlock_irq(&rp->lock);
 
 	free_irq(rp->pdev->irq, dev);
-
-	flush_scheduled_work();
-
 	free_rbufs(dev);
 	free_tbufs(dev);
 	free_ring(dev);
diff --git a/fs/Makefile b/fs/Makefile
index 83bf478..078d3d1 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -45,6 +45,7 @@
 obj-$(CONFIG_PROC_FS)		+= proc/
 obj-y				+= partitions/
 obj-$(CONFIG_SYSFS)		+= sysfs/
+obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
 obj-y				+= devpts/
 
 obj-$(CONFIG_PROFILING)		+= dcookies.o
@@ -100,5 +101,4 @@
 obj-$(CONFIG_HOSTFS)		+= hostfs/
 obj-$(CONFIG_HPPFS)		+= hppfs/
 obj-$(CONFIG_DEBUG_FS)		+= debugfs/
-obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
 obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5638c8f..5f95218 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -505,13 +505,15 @@
 	int i;
 
 	if (group->default_groups) {
-		/* FYI, we're faking mkdir here
+		/*
+		 * FYI, we're faking mkdir here
 		 * I'm not sure we need this semaphore, as we're called
 		 * from our parent's mkdir.  That holds our parent's
 		 * i_mutex, so afaik lookup cannot continue through our
 		 * parent to find us, let alone mess with our tree.
 		 * That said, taking our i_mutex is closer to mkdir
-		 * emulation, and shouldn't hurt. */
+		 * emulation, and shouldn't hurt.
+		 */
 		mutex_lock(&dentry->d_inode->i_mutex);
 
 		for (i = 0; group->default_groups[i]; i++) {
@@ -546,20 +548,34 @@
 
 		item->ci_group = NULL;
 		item->ci_parent = NULL;
+
+		/* Drop the reference for ci_entry */
 		config_item_put(item);
 
+		/* Drop the reference for ci_parent */
 		config_group_put(group);
 	}
 }
 
 static void link_obj(struct config_item *parent_item, struct config_item *item)
 {
-	/* Parent seems redundant with group, but it makes certain
-	 * traversals much nicer. */
+	/*
+	 * Parent seems redundant with group, but it makes certain
+	 * traversals much nicer.
+	 */
 	item->ci_parent = parent_item;
+
+	/*
+	 * We hold a reference on the parent for the child's ci_parent
+	 * link.
+	 */
 	item->ci_group = config_group_get(to_config_group(parent_item));
 	list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
 
+	/*
+	 * We hold a reference on the child for ci_entry on the parent's
+	 * cg_children
+	 */
 	config_item_get(item);
 }
 
@@ -684,6 +700,10 @@
 	type = parent_item->ci_type;
 	BUG_ON(!type);
 
+	/*
+	 * If ->drop_item() exists, it is responsible for the
+	 * config_item_put().
+	 */
 	if (type->ct_group_ops && type->ct_group_ops->drop_item)
 		type->ct_group_ops->drop_item(to_config_group(parent_item),
 						item);
@@ -694,23 +714,28 @@
 
 static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
-	int ret;
+	int ret, module_got = 0;
 	struct config_group *group;
 	struct config_item *item;
 	struct config_item *parent_item;
 	struct configfs_subsystem *subsys;
 	struct configfs_dirent *sd;
 	struct config_item_type *type;
-	struct module *owner;
+	struct module *owner = NULL;
 	char *name;
 
-	if (dentry->d_parent == configfs_sb->s_root)
-		return -EPERM;
+	if (dentry->d_parent == configfs_sb->s_root) {
+		ret = -EPERM;
+		goto out;
+	}
 
 	sd = dentry->d_parent->d_fsdata;
-	if (!(sd->s_type & CONFIGFS_USET_DIR))
-		return -EPERM;
+	if (!(sd->s_type & CONFIGFS_USET_DIR)) {
+		ret = -EPERM;
+		goto out;
+	}
 
+	/* Get a working ref for the duration of this function */
 	parent_item = configfs_get_config_item(dentry->d_parent);
 	type = parent_item->ci_type;
 	subsys = to_config_group(parent_item)->cg_subsys;
@@ -719,15 +744,16 @@
 	if (!type || !type->ct_group_ops ||
 	    (!type->ct_group_ops->make_group &&
 	     !type->ct_group_ops->make_item)) {
-		config_item_put(parent_item);
-		return -EPERM;  /* What lack-of-mkdir returns */
+		ret = -EPERM;  /* Lack-of-mkdir returns -EPERM */
+		goto out_put;
 	}
 
 	name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
 	if (!name) {
-		config_item_put(parent_item);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_put;
 	}
+
 	snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
 
 	down(&subsys->su_sem);
@@ -748,40 +774,67 @@
 
 	kfree(name);
 	if (!item) {
-		config_item_put(parent_item);
-		return -ENOMEM;
+		/*
+		 * If item == NULL, then link_obj() was never called.
+		 * There are no extra references to clean up.
+		 */
+		ret = -ENOMEM;
+		goto out_put;
 	}
 
-	ret = -EINVAL;
+	/*
+	 * link_obj() has been called (via link_group() for groups).
+	 * From here on out, errors must clean that up.
+	 */
+
 	type = item->ci_type;
-	if (type) {
-		owner = type->ct_owner;
-		if (try_module_get(owner)) {
-			if (group) {
-				ret = configfs_attach_group(parent_item,
-							    item,
-							    dentry);
-			} else {
-				ret = configfs_attach_item(parent_item,
-							   item,
-							   dentry);
-			}
-
-			if (ret) {
-				down(&subsys->su_sem);
-				if (group)
-					unlink_group(group);
-				else
-					unlink_obj(item);
-				client_drop_item(parent_item, item);
-				up(&subsys->su_sem);
-
-				config_item_put(parent_item);
-				module_put(owner);
-			}
-		}
+	if (!type) {
+		ret = -EINVAL;
+		goto out_unlink;
 	}
 
+	owner = type->ct_owner;
+	if (!try_module_get(owner)) {
+		ret = -EINVAL;
+		goto out_unlink;
+	}
+
+	/*
+	 * I hate doing it this way, but if there is
+	 * an error,  module_put() probably should
+	 * happen after any cleanup.
+	 */
+	module_got = 1;
+
+	if (group)
+		ret = configfs_attach_group(parent_item, item, dentry);
+	else
+		ret = configfs_attach_item(parent_item, item, dentry);
+
+out_unlink:
+	if (ret) {
+		/* Tear down everything we built up */
+		down(&subsys->su_sem);
+		if (group)
+			unlink_group(group);
+		else
+			unlink_obj(item);
+		client_drop_item(parent_item, item);
+		up(&subsys->su_sem);
+
+		if (module_got)
+			module_put(owner);
+	}
+
+out_put:
+	/*
+	 * link_obj()/link_group() took a reference from child->parent,
+	 * so the parent is safely pinned.  We can drop our working
+	 * reference.
+	 */
+	config_item_put(parent_item);
+
+out:
 	return ret;
 }
 
@@ -801,6 +854,7 @@
 	if (sd->s_type & CONFIGFS_USET_DEFAULT)
 		return -EPERM;
 
+	/* Get a working ref until we have the child */
 	parent_item = configfs_get_config_item(dentry->d_parent);
 	subsys = to_config_group(parent_item)->cg_subsys;
 	BUG_ON(!subsys);
@@ -817,6 +871,7 @@
 		return ret;
 	}
 
+	/* Get a working ref for the duration of this function */
 	item = configfs_get_config_item(dentry);
 
 	/* Drop reference from above, item already holds one. */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d858d0..47152bf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -276,13 +276,29 @@
 	return ret;
 }
 
+/* This can also be called from ocfs2_write_zero_page() which has done
+ * it's own cluster locking. */
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+			       unsigned from, unsigned to)
+{
+	int ret;
+
+	down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+	ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+	up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+	return ret;
+}
+
 /*
  * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
  * from loopback.  It must be able to perform its own locking around
  * ocfs2_get_block().
  */
-int ocfs2_prepare_write(struct file *file, struct page *page,
-			unsigned from, unsigned to)
+static int ocfs2_prepare_write(struct file *file, struct page *page,
+			       unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
 	int ret;
@@ -295,11 +311,7 @@
 		goto out;
 	}
 
-	down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
-	ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
-	up_read(&OCFS2_I(inode)->ip_alloc_sem);
+	ret = ocfs2_prepare_write_nolock(inode, page, from, to);
 
 	ocfs2_meta_unlock(inode, 0);
 out:
@@ -625,11 +637,31 @@
 	int ret;
 
 	mlog_entry_void();
+
+	/*
+	 * We get PR data locks even for O_DIRECT.  This allows
+	 * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
+	 * extending and buffered zeroing writes race.  If they did
+	 * race then the buffered zeroing could be written back after
+	 * the O_DIRECT I/O.  It's one thing to tell people not to mix
+	 * buffered and O_DIRECT writes, but expecting them to
+	 * understand that file extension is also an implicit buffered
+	 * write is too much.  By getting the PR we force writeback of
+	 * the buffered zeroing before proceeding.
+	 */
+	ret = ocfs2_data_lock(inode, 0);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out;
+	}
+	ocfs2_data_unlock(inode, 0);
+
 	ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
 					    inode->i_sb->s_bdev, iov, offset,
 					    nr_segs, 
 					    ocfs2_direct_IO_get_blocks,
 					    ocfs2_dio_end_io);
+out:
 	mlog_exit(ret);
 	return ret;
 }
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index d40456d..e88c3f0 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,8 +22,8 @@
 #ifndef OCFS2_AOPS_H
 #define OCFS2_AOPS_H
 
-int ocfs2_prepare_write(struct file *file, struct page *page,
-			unsigned from, unsigned to);
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+			       unsigned from, unsigned to);
 
 struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
 							 struct page *page,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 4601fc2..1a5c690 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -569,7 +569,7 @@
 
 	ret = -ENOMEM;
 	ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
-					GFP_KERNEL);
+					GFP_NOFS);
 	if (!ctxt.new_ent) {
 		mlog_errno(ret);
 		return ret;
@@ -583,14 +583,14 @@
 		if (ctxt.need_left && !ctxt.left_ent) {
 			ctxt.left_ent =
 				kmem_cache_alloc(ocfs2_em_ent_cachep,
-						 GFP_KERNEL);
+						 GFP_NOFS);
 			if (!ctxt.left_ent)
 				break;
 		}
 		if (ctxt.need_right && !ctxt.right_ent) {
 			ctxt.right_ent =
 				kmem_cache_alloc(ocfs2_em_ent_cachep,
-						 GFP_KERNEL);
+						 GFP_NOFS);
 			if (!ctxt.right_ent)
 				break;
 		}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 581eb45..a9559c8 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -613,7 +613,8 @@
 
 /* Some parts of this taken from generic_cont_expand, which turned out
  * to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->commit_write(). */
+ * worry about recursive locking in ->prepare_write() and
+ * ->commit_write(). */
 static int ocfs2_write_zero_page(struct inode *inode,
 				 u64 size)
 {
@@ -641,7 +642,7 @@
 		goto out;
 	}
 
-	ret = ocfs2_prepare_write(NULL, page, offset, offset);
+	ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
 	if (ret < 0) {
 		mlog_errno(ret);
 		goto out_unlock;
@@ -695,13 +696,26 @@
 	return ret;
 }
 
+/* 
+ * A tail_to_skip value > 0 indicates that we're being called from
+ * ocfs2_file_aio_write(). This has the following implications:
+ *
+ * - we don't want to update i_size
+ * - di_bh will be NULL, which is fine because it's only used in the
+ *   case where we want to update i_size.
+ * - ocfs2_zero_extend() will then only be filling the hole created
+ *   between i_size and the start of the write.
+ */
 static int ocfs2_extend_file(struct inode *inode,
 			     struct buffer_head *di_bh,
-			     u64 new_i_size)
+			     u64 new_i_size,
+			     size_t tail_to_skip)
 {
 	int ret = 0;
 	u32 clusters_to_add;
 
+	BUG_ON(!tail_to_skip && !di_bh);
+
 	/* setattr sometimes calls us like this. */
 	if (new_i_size == 0)
 		goto out;
@@ -714,27 +728,44 @@
 		OCFS2_I(inode)->ip_clusters;
 
 	if (clusters_to_add) {
+		/* 
+		 * protect the pages that ocfs2_zero_extend is going to
+		 * be pulling into the page cache.. we do this before the
+		 * metadata extend so that we don't get into the situation
+		 * where we've extended the metadata but can't get the data
+		 * lock to zero.
+		 */
+		ret = ocfs2_data_lock(inode, 1);
+		if (ret < 0) {
+			mlog_errno(ret);
+			goto out;
+		}
+
 		ret = ocfs2_extend_allocation(inode, clusters_to_add);
 		if (ret < 0) {
 			mlog_errno(ret);
-			goto out;
+			goto out_unlock;
 		}
 
-		ret = ocfs2_zero_extend(inode, new_i_size);
+		ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
 		if (ret < 0) {
 			mlog_errno(ret);
-			goto out;
+			goto out_unlock;
 		}
-	} 
-
-	/* No allocation required, we just use this helper to
-	 * do a trivial update of i_size. */
-	ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
-	if (ret < 0) {
-		mlog_errno(ret);
-		goto out;
 	}
 
+	if (!tail_to_skip) {
+		/* We're being called from ocfs2_setattr() which wants
+		 * us to update i_size */
+		ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+		if (ret < 0)
+			mlog_errno(ret);
+	}
+
+out_unlock:
+	if (clusters_to_add) /* this is the only case in which we lock */
+		ocfs2_data_unlock(inode, 1);
+
 out:
 	return ret;
 }
@@ -793,7 +824,7 @@
 		if (i_size_read(inode) > attr->ia_size)
 			status = ocfs2_truncate_file(inode, bh, attr->ia_size);
 		else
-			status = ocfs2_extend_file(inode, bh, attr->ia_size);
+			status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
 		if (status < 0) {
 			if (status != -ENOSPC)
 				mlog_errno(status);
@@ -1049,21 +1080,12 @@
 		if (!clusters)
 			break;
 
-		ret = ocfs2_extend_allocation(inode, clusters);
+		ret = ocfs2_extend_file(inode, NULL, newsize, count);
 		if (ret < 0) {
 			if (ret != -ENOSPC)
 				mlog_errno(ret);
 			goto out;
 		}
-
-		/* Fill any holes which would've been created by this
-		 * write. If we're O_APPEND, this will wind up
-		 * (correctly) being a noop. */
-		ret = ocfs2_zero_extend(inode, (u64) newsize - count);
-		if (ret < 0) {
-			mlog_errno(ret);
-			goto out;
-		}
 		break;
 	}
 
@@ -1146,6 +1168,22 @@
 		ocfs2_iocb_set_rw_locked(iocb);
 	}
 
+	/*
+	 * We're fine letting folks race truncates and extending
+	 * writes with read across the cluster, just like they can
+	 * locally. Hence no rw_lock during read.
+	 * 
+	 * Take and drop the meta data lock to update inode fields
+	 * like i_size. This allows the checks down below
+	 * generic_file_aio_read() a chance of actually working. 
+	 */
+	ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto bail;
+	}
+	ocfs2_meta_unlock(inode, 0);
+
 	ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
 	if (ret == -EINVAL)
 		mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 6a610ae..eebc3cf 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -117,7 +117,7 @@
 {
 	struct ocfs2_journal_handle *retval = NULL;
 
-	retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+	retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
 	if (!retval) {
 		mlog(ML_ERROR, "Failed to allocate memory for journal "
 		     "handle!\n");
@@ -870,9 +870,11 @@
 		if (p_blocks > CONCURRENT_JOURNAL_FILL)
 			p_blocks = CONCURRENT_JOURNAL_FILL;
 
+		/* We are reading journal data which should not
+		 * be put in the uptodate cache */
 		status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
 					   p_blkno, p_blocks, bhs, 0,
-					   inode);
+					   NULL);
 		if (status < 0) {
 			mlog_errno(status);
 			goto bail;
@@ -982,7 +984,7 @@
 {
 	struct ocfs2_la_recovery_item *item;
 
-	item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+	item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
 	if (!item) {
 		/* Though we wish to avoid it, we are in fact safe in
 		 * skipping local alloc cleanup as fsck.ocfs2 is more
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 04a684d..b8a00a7 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -337,7 +337,7 @@
 	     (unsigned long long)oi->ip_blkno,
 	     (unsigned long long)block, expand_tree);
 
-	new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+	new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
 	if (!new) {
 		mlog_errno(-ENOMEM);
 		return;
@@ -349,7 +349,7 @@
 		 * has no way of tracking that. */
 		for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
 			tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
-						   GFP_KERNEL);
+						   GFP_NOFS);
 			if (!tree[i]) {
 				mlog_errno(-ENOMEM);
 				goto out_free;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 53049a2..ee42765 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -586,7 +586,7 @@
 {
 	struct ocfs2_net_wait_ctxt *w;
 
-	w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+	w = kcalloc(1, sizeof(*w), GFP_NOFS);
 	if (!w) {
 		mlog_errno(-ENOMEM);
 		goto bail;
@@ -749,7 +749,7 @@
 
 	BUG_ON(!ocfs2_is_valid_vote_request(type));
 
-	request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+	request = kcalloc(1, sizeof(*request), GFP_NOFS);
 	if (!request) {
 		mlog_errno(-ENOMEM);
 	} else {
@@ -1129,7 +1129,7 @@
 	struct ocfs2_super *osb = data;
 	struct ocfs2_vote_work *work;
 
-	work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+	work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
 	if (!work) {
 		status = -ENOMEM;
 		mlog_errno(status);
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 43ad4e5..406ca97 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -142,6 +142,9 @@
 	: "cc");
 }
 
+/* write_can_lock - would write_trylock() succeed? */
+#define __raw_write_can_lock(x)		((x)->lock == 0x80000000)
+
 /*
  * Read locks are a bit more hairy:
  *  - Exclusively load the lock value.
@@ -198,4 +201,7 @@
 
 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
+/* read_can_lock - would read_trylock() succeed? */
+#define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bdc556d..03a14a3 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -69,6 +69,7 @@
 	unsigned int		timeout_ns;	/* data timeout (in ns, max 80ms) */
 	unsigned int		timeout_clks;	/* data timeout (in clocks) */
 	unsigned int		blksz_bits;	/* data block size */
+	unsigned int		blksz;		/* data block size */
 	unsigned int		blocks;		/* number of blocks */
 	unsigned int		error;		/* data error */
 	unsigned int		flags;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 34a1a09..807d6f1 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -99,6 +99,7 @@
 	SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
 	SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
 	SCTP_CMD_FORCE_PRIM_RETRAN,  /* Forces retrans. over primary path. */
+	SCTP_CMD_SET_SK_ERR,	 /* Set sk_err */
 	SCTP_CMD_LAST
 } sctp_verb_t;
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index e673b2c..aa6033c 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -461,12 +461,12 @@
  * there is room for a param header too.
  */
 #define sctp_walk_params(pos, chunk, member)\
-_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member)
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
 
 #define _sctp_walk_params(pos, chunk, end, member)\
 for (pos.v = chunk->member;\
      pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
-     pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\
+     pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
      pos.v += WORD_ROUND(ntohs(pos.p->length)))
 
@@ -477,7 +477,7 @@
 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
 	    sizeof(sctp_chunkhdr_t));\
      (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
-     (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\
+     (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
      err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
 
diff --git a/net/802/tr.c b/net/802/tr.c
index afd8385..e9dc803 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -643,6 +643,5 @@
 
 module_init(rif_init);
 
-EXPORT_SYMBOL(tr_source_route);
 EXPORT_SYMBOL(tr_type_trans);
 EXPORT_SYMBOL(alloc_trdev);
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92..466ed34 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@
 
 	if (info->bitmask & EBT_LOG_NFLOG)
 		nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
-		              info->prefix);
+		              "%s", info->prefix);
 	else
 		ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
 		               info->prefix);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c2d92f9..d0d1919 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -948,7 +948,7 @@
 
 	write_lock_bh(&t->lock);
 	private = t->private;
-	if (private->number != paddc->num_counters) {
+	if (private->number != tmp.num_counters) {
 		ret = -EINVAL;
 		goto unlock_up_free;
 	}
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d..96ceaba 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@
 	     const union ip_conntrack_manip_proto *min,
 	     const union ip_conntrack_manip_proto *max)
 {
-	u_int32_t key;
+	__be16 key;
 
 	if (maniptype == IP_NAT_MANIP_SRC)
 		key = tuple->src.u.gre.key;
 	else
 		key = tuple->dst.u.gre.key;
 
-	return ntohl(key) >= ntohl(min->gre.key)
-		&& ntohl(key) <= ntohl(max->gre.key);
+	return ntohs(key) >= ntohs(min->gre.key)
+		&& ntohs(key) <= ntohs(max->gre.key);
 }
 
 /* generate unique tuple ... */
@@ -81,14 +81,14 @@
 		min = 1;
 		range_size = 0xffff;
 	} else {
-		min = ntohl(range->min.gre.key);
-		range_size = ntohl(range->max.gre.key) - min + 1;
+		min = ntohs(range->min.gre.key);
+		range_size = ntohs(range->max.gre.key) - min + 1;
 	}
 
 	DEBUGP("min = %u, range_size = %u\n", min, range_size); 
 
 	for (i = 0; i < range_size; i++, key++) {
-		*keyptr = htonl(min + key % range_size);
+		*keyptr = htons(min + key % range_size);
 		if (!ip_nat_used_tuple(tuple, conntrack))
 			return 1;
 	}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2..b98f7b0 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@
 
 	if (loginfo->logflags & IPT_LOG_NFLOG)
 		nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
-		              loginfo->prefix);
+		              "%s", loginfo->prefix);
 	else
 		ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
 		               loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 1438432..b847ee4 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@
 	/* Create our proc 'status' entry. */
 	curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
 	if (!curr_table->status_proc) {
+		vfree(hold);
 		printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
 		/* Destroy the created table */
 		spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@
 		spin_unlock_bh(&recent_lock);
 		vfree(curr_table->time_info);
 		vfree(curr_table->hash_table);
-		vfree(hold);
 		vfree(curr_table->table);
 		vfree(curr_table);
 		return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9f0cca4..4a538bc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1662,6 +1662,8 @@
 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 				tp->lost_out += tcp_skb_pcount(skb);
+				if (IsReno(tp))
+					tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
 
 				/* clear xmit_retrans hint */
 				if (tp->retransmit_skb_hint &&
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0a67303..2e72f89 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1103,7 +1103,7 @@
 
 	write_lock_bh(&t->lock);
 	private = t->private;
-	if (private->number != paddc->num_counters) {
+	if (private->number != tmp.num_counters) {
 		ret = -EINVAL;
 		goto unlock_up_free;
 	}
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de..73c6300 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@
 
 	if (loginfo->logflags & IP6T_LOG_NFLOG)
 		nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
-		              loginfo->prefix);
+		              "%s", loginfo->prefix);
 	else
 		ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
 		                loginfo->prefix);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 94dbdb8..4f6b84c 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -40,7 +40,7 @@
 
 	memset(eui64, 0, sizeof(eui64));
 
-	if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+	if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
 		if (skb->nh.ipv6h->version == 0x6) {
 			memcpy(eui64, eth_hdr(skb)->h_source, 3);
 			memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2dbf134..811d998 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -944,9 +944,9 @@
 	return rc;
 }
 
-static int ipx_map_frame_type(unsigned char type)
+static __be16 ipx_map_frame_type(unsigned char type)
 {
-	int rc = 0;
+	__be16 rc = 0;
 
 	switch (type) {
 	case IPX_FRAME_ETHERII:	rc = htons(ETH_P_IPX);		break;
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 6777444..a394c6f 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -119,7 +119,7 @@
 	return rc;
 }
 
-static int ipxrtr_delete(long net)
+static int ipxrtr_delete(__u32 net)
 {
 	struct ipx_route *r, *tmp;
 	int rc;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c60273c..61cdda4 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@
 nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
 {
 	spin_lock_bh(&inst->lock);
-	inst->flags = ntohs(flags);
+	inst->flags = flags;
 	spin_unlock_bh(&inst->lock);
 
 	return 0;
@@ -902,7 +902,7 @@
 	if (nfula[NFULA_CFG_FLAGS-1]) {
 		u_int16_t flags =
 			*(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
-		nfulnl_set_flags(inst, ntohl(flags));
+		nfulnl_set_flags(inst, ntohs(flags));
 	}
 
 out_put:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 31eb837..138ea92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -193,8 +193,10 @@
 		    netif_running(dev) &&
 		    netif_carrier_ok(dev)) {
 			if (netif_queue_stopped(dev) &&
-			    (jiffies - dev->trans_start) > dev->watchdog_timeo) {
-				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+			    time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
+
+				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
+				       dev->name);
 				dev->tx_timeout(dev);
 			}
 			if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc..1662f9c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@
 					const union sctp_addr *peer,
 					struct sctp_transport **pt);
 
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
 static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@
 	 */
 	if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
 	{
-		sock_put(sk);
 		if (asoc) {
 			sctp_association_put(asoc);
 			asoc = NULL;
@@ -197,7 +198,6 @@
 		sk = sctp_get_ctl_sock();
 		ep = sctp_sk(sk)->ep;
 		sctp_endpoint_hold(ep);
-		sock_hold(sk);
 		rcvr = &ep->base;
 	}
 
@@ -253,25 +253,18 @@
 	 */
 	sctp_bh_lock_sock(sk);
 
-	/* It is possible that the association could have moved to a different
-	 * socket if it is peeled off. If so, update the sk.
-	 */ 
-	if (sk != rcvr->sk) {
-		sctp_bh_lock_sock(rcvr->sk);
-		sctp_bh_unlock_sock(sk);
-		sk = rcvr->sk;
-	}
-
 	if (sock_owned_by_user(sk))
-		sk_add_backlog(sk, skb);
+		sctp_add_backlog(sk, skb);
 	else
-		sctp_backlog_rcv(sk, skb);
+		sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 
-	/* Release the sock and the sock ref we took in the lookup calls.
-	 * The asoc/ep ref will be released in sctp_backlog_rcv.
-	 */
 	sctp_bh_unlock_sock(sk);
-	sock_put(sk);
+
+	/* Release the asoc/ep ref we took in the lookup calls. */
+	if (asoc)
+		sctp_association_put(asoc);
+	else
+		sctp_endpoint_put(ep);
 
 	return 0;
 
@@ -280,8 +273,7 @@
 	return 0;
 
 discard_release:
-	/* Release any structures we may be holding. */
-	sock_put(sk);
+	/* Release the asoc/ep ref we took in the lookup calls. */
 	if (asoc)
 		sctp_association_put(asoc);
 	else
@@ -290,56 +282,87 @@
 	goto discard_it;
 }
 
-/* Handle second half of inbound skb processing.  If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog).   If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket.  Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
  */
 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
- 	struct sctp_inq *inqueue = NULL;
+ 	struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
  	struct sctp_ep_common *rcvr = NULL;
+	int backloged = 0;
 
  	rcvr = chunk->rcvr;
 
-	BUG_TRAP(rcvr->sk == sk);
+	/* If the rcvr is dead then the association or endpoint
+	 * has been deleted and we can safely drop the chunk
+	 * and refs that we are holding.
+	 */
+	if (rcvr->dead) {
+		sctp_chunk_free(chunk);
+		goto done;
+	}
 
- 	if (rcvr->dead) {
- 		sctp_chunk_free(chunk);
- 	} else {
- 		inqueue = &chunk->rcvr->inqueue;
- 		sctp_inq_push(inqueue, chunk);
- 	}
+	if (unlikely(rcvr->sk != sk)) {
+		/* In this case, the association moved from one socket to
+		 * another.  We are currently sitting on the backlog of the
+		 * old socket, so we need to move.
+		 * However, since we are here in the process context we
+		 * need to take make sure that the user doesn't own
+		 * the new socket when we process the packet.
+		 * If the new socket is user-owned, queue the chunk to the
+		 * backlog of the new socket without dropping any refs.
+		 * Otherwise, we can safely push the chunk on the inqueue.
+		 */
 
-	/* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 
- 	if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- 		sctp_association_put(sctp_assoc(rcvr));
- 	else
- 		sctp_endpoint_put(sctp_ep(rcvr));
-  
+		sk = rcvr->sk;
+		sctp_bh_lock_sock(sk);
+
+		if (sock_owned_by_user(sk)) {
+			sk_add_backlog(sk, skb);
+			backloged = 1;
+		} else
+			sctp_inq_push(inqueue, chunk);
+
+		sctp_bh_unlock_sock(sk);
+
+		/* If the chunk was backloged again, don't drop refs */
+		if (backloged)
+			return 0;
+	} else {
+		sctp_inq_push(inqueue, chunk);
+	}
+
+done:
+	/* Release the refs we took in sctp_add_backlog */
+	if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+		sctp_association_put(sctp_assoc(rcvr));
+	else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+		sctp_endpoint_put(sctp_ep(rcvr));
+	else
+		BUG();
+
         return 0;
 }
 
-void sctp_backlog_migrate(struct sctp_association *assoc, 
-			  struct sock *oldsk, struct sock *newsk)
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-	struct sk_buff *skb;
-	struct sctp_chunk *chunk;
+	struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+	struct sctp_ep_common *rcvr = chunk->rcvr;
 
-	skb = oldsk->sk_backlog.head;
-	oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
-	while (skb != NULL) {
-		struct sk_buff *next = skb->next;
+	/* Hold the assoc/ep while hanging on the backlog queue.
+	 * This way, we know structures we need will not disappear from us
+	 */
+	if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+		sctp_association_hold(sctp_assoc(rcvr));
+	else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+		sctp_endpoint_hold(sctp_ep(rcvr));
+	else
+		BUG();
 
-		chunk = SCTP_INPUT_CB(skb)->chunk;
-		skb->next = NULL;
-		if (&assoc->base == chunk->rcvr)
-			sk_add_backlog(newsk, skb);
-		else
-			sk_add_backlog(oldsk, skb);
-		skb = next;
-	}
+	sk_add_backlog(sk, skb);
 }
 
 /* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@
 	union sctp_addr daddr;
 	struct sctp_af *af;
 	struct sock *sk = NULL;
-	struct sctp_association *asoc = NULL;
+	struct sctp_association *asoc;
 	struct sctp_transport *transport = NULL;
 
 	*app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@
 	return sk;
 
 out:
-	sock_put(sk);
 	if (asoc)
 		sctp_association_put(asoc);
 	return NULL;
@@ -463,7 +485,6 @@
 void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
 {
 	sctp_bh_unlock_sock(sk);
-	sock_put(sk);
 	if (asoc)
 		sctp_association_put(asoc);
 }
@@ -490,7 +511,7 @@
 	int type = skb->h.icmph->type;
 	int code = skb->h.icmph->code;
 	struct sock *sk;
-	struct sctp_association *asoc;
+	struct sctp_association *asoc = NULL;
 	struct sctp_transport *transport;
 	struct inet_sock *inet;
 	char *saveip, *savesctp;
@@ -716,7 +737,6 @@
 
 hit:
 	sctp_endpoint_hold(ep);
-	sock_hold(epb->sk);
 	read_unlock(&head->lock);
 	return ep;
 }
@@ -818,7 +838,6 @@
 hit:
 	*pt = transport;
 	sctp_association_hold(asoc);
-	sock_hold(epb->sk);
 	read_unlock(&head->lock);
 	return asoc;
 }
@@ -846,7 +865,6 @@
 	struct sctp_transport *transport;
 
 	if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
-		sock_put(asoc->base.sk);
 		sctp_association_put(asoc);
 		return 1;
 	}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24..c5beb2a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
 
-	/* Set sk_err to ECONNRESET on a 1-1 style socket. */
-	if (!sctp_style(asoc->base.sk, UDP))
-		asoc->base.sk->sk_err = ECONNRESET; 
-
 	/* SEND_FAILED sent later when cleaning up the association. */
 	asoc->outqueue.error = error;
 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@
 	return;
 }
 
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+	struct sock *sk = asoc->base.sk;
+
+	if (!sctp_style(sk, UDP))
+		sk->sk_err = error;
+}
+
 /* These three macros allow us to pull the debugging code out of the
  * main flow of sctp_do_sm() to keep attention focused on the real
  * functionality there.
@@ -1458,6 +1463,9 @@
 			local_cork = 0;
 			asoc->peer.retran_path = t;
 			break;
+		case SCTP_CMD_SET_SK_ERR:
+			sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+			break;
 		default:
 			printk(KERN_WARNING "Impossible command: %u, %p\n",
 			       cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8cdba51..8bc2792 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-					   __u16 error,
+					   __u16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport);
 
@@ -448,7 +448,7 @@
 	__u32 init_tag;
 	struct sctp_chunk *err_chunk;
 	struct sctp_packet *packet;
-	sctp_disposition_t ret;
+	__u16 error;
 
 	if (!sctp_vtag_verify(chunk, asoc))
 		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@
 			goto nomem;
 
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
-		sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-				SCTP_STATE(SCTP_STATE_CLOSED));
-		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-		sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
-		return SCTP_DISPOSITION_DELETE_TCB;
+		return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
+					      ECONNREFUSED, asoc,
+					      chunk->transport);
 	}
 
 	/* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@
 				sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
 						SCTP_PACKET(packet));
 				SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
-				sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-						SCTP_STATE(SCTP_STATE_CLOSED));
-				sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-						SCTP_NULL());
-				return SCTP_DISPOSITION_CONSUME;
+				error = SCTP_ERROR_INV_PARAM;
 			} else {
-				sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-						SCTP_STATE(SCTP_STATE_CLOSED));
-				sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-						SCTP_NULL());
-				return SCTP_DISPOSITION_NOMEM;
+				error = SCTP_ERROR_NO_RESOURCE;
 			}
 		} else {
-			ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
-						   commands);
-			sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-					SCTP_STATE(SCTP_STATE_CLOSED));
-			sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-					SCTP_NULL());
-			return ret;
+			sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+			error = SCTP_ERROR_INV_PARAM;
 		}
+		return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+						asoc, chunk->transport);
 	}
 
 	/* Tag the variable length parameters.  Note that we never
@@ -886,6 +873,8 @@
 	struct sctp_transport *transport = (struct sctp_transport *) arg;
 
 	if (asoc->overall_error_count >= asoc->max_retrans) {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -1030,6 +1019,12 @@
 						  commands);
 
 	hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
+	/* Make sure that the length of the parameter is what we expect */
+	if (ntohs(hbinfo->param_hdr.length) !=
+				    sizeof(sctp_sender_hb_info_t)) {
+		return SCTP_DISPOSITION_DISCARD;
+	}
+
 	from_addr = hbinfo->daddr;
 	link = sctp_assoc_lookup_paddr(asoc, &from_addr);
 
@@ -2126,6 +2121,8 @@
 	int attempts = asoc->init_err_counter + 1;
 
 	if (attempts > asoc->max_init_attempts) {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 				SCTP_U32(SCTP_ERROR_STALE_COOKIE));
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -2262,6 +2259,7 @@
 	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
 		error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
+	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
  	/* ASSOC_FAILED will DELETE_TCB. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
 	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2306,7 +2304,8 @@
 	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
 		error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-	return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport);
+	return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+				      chunk->transport);
 }
 
 /*
@@ -2318,7 +2317,8 @@
 					void *arg,
 					sctp_cmd_seq_t *commands)
 {
-	return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc,
+	return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+				      ENOPROTOOPT, asoc,
 				      (struct sctp_transport *)arg);
 }
 
@@ -2343,7 +2343,7 @@
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-					   __u16 error,
+					   __u16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport)
 {
@@ -2353,6 +2353,7 @@
 	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
 	/* CMD_INIT_FAILED will DELETE_TCB. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 			SCTP_U32(error));
@@ -3336,6 +3337,8 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
 		sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_ASCONF_ACK));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3362,6 +3365,8 @@
 		 * processing the rest of the chunks in the packet.
 		 */
 		sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_ASCONF_ACK));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3714,9 +3719,13 @@
 	if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ECONNREFUSED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 				SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
 	} else {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4034,6 +4043,8 @@
 	 * TCB.  This is a departure from our typical NOMEM handling.
 	 */
 
+	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			SCTP_ERROR(ECONNABORTED));
 	/* Delete the established association. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 			SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4175,6 +4186,8 @@
 	 * TCB.  This is a departure from our typical NOMEM handling.
 	 */
 
+	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			SCTP_ERROR(ECONNREFUSED));
 	/* Delete the established association. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 			SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4543,6 +4556,8 @@
 	struct sctp_transport *transport = arg;
 
 	if (asoc->overall_error_count >= asoc->max_retrans) {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4662,6 +4677,8 @@
 		SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
 				  " max_init_attempts: %d\n",
 				  attempts, asoc->max_init_attempts);
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -4711,6 +4728,8 @@
 
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
 	} else {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -4742,6 +4761,8 @@
 
 	SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
 	if (asoc->overall_error_count >= asoc->max_retrans) {
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		/* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4817,6 +4838,8 @@
 	if (asoc->overall_error_count >= asoc->max_retrans) {
 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
 				SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_ERROR));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4870,6 +4893,8 @@
 		goto nomem;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+			SCTP_ERROR(ETIMEDOUT));
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 			SCTP_U32(SCTP_ERROR_NO_ERROR));
 
@@ -5309,6 +5334,8 @@
 		 * processing the rest of the chunks in the packet.
 		 */
 		sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+				SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
 				SCTP_U32(SCTP_ERROR_NO_DATA));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89..174d4d3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@
 	inet_sk(sk)->dport = htons(asoc->peer.port);
 	af = sctp_get_af_specific(to.sa.sa_family);
 	af->to_sk_daddr(&to, sk);
+	sk->sk_err = 0;
 
 	timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
 	err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@
 
 	ep = sctp_sk(sk)->ep;
 
-	/* Walk all associations on a socket, not on an endpoint.  */
+	/* Walk all associations on an endpoint.  */
 	list_for_each_safe(pos, temp, &ep->asocs) {
 		asoc = list_entry(pos, struct sctp_association, asocs);
 
@@ -1241,13 +1242,13 @@
 			if (sctp_state(asoc, CLOSED)) {
 				sctp_unhash_established(asoc);
 				sctp_association_free(asoc);
+				continue;
+			}
+		}
 
-			} else if (sock_flag(sk, SOCK_LINGER) &&
-				   !sk->sk_lingertime)
-				sctp_primitive_ABORT(asoc, NULL);
-			else
-				sctp_primitive_SHUTDOWN(asoc, NULL);
-		} else
+		if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
+			sctp_primitive_ABORT(asoc, NULL);
+		else
 			sctp_primitive_SHUTDOWN(asoc, NULL);
 	}
 
@@ -5317,6 +5318,7 @@
 		 */
 		sctp_release_sock(sk);
 		current_timeo = schedule_timeout(current_timeo);
+		BUG_ON(sk != asoc->base.sk);
 		sctp_lock_sock(sk);
 
 		*timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@
 	 */
 	newsp->type = type;
 
-	spin_lock_bh(&oldsk->sk_lock.slock);
-	/* Migrate the backlog from oldsk to newsk. */
-	sctp_backlog_migrate(assoc, oldsk, newsk);
-	/* Migrate the association to the new socket. */
+	/* Mark the new socket "in-use" by the user so that any packets
+	 * that may arrive on the association after we've moved it are
+	 * queued to the backlog.  This prevents a potential race between
+	 * backlog processing on the old socket and new-packet processing
+	 * on the new socket.
+	 */
+	sctp_lock_sock(newsk);
 	sctp_assoc_migrate(assoc, newsk);
-	spin_unlock_bh(&oldsk->sk_lock.slock);
 
 	/* If the association on the newsk is already closed before accept()
 	 * is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@
 		newsk->sk_shutdown |= RCV_SHUTDOWN;
 
 	newsk->sk_state = SCTP_SS_ESTABLISHED;
+	sctp_release_sock(newsk);
 }
 
 /* This proto struct describes the ULP interface for SCTP.  */