Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
  firewire: Only set client->iso_context if allocation was successful.
  ieee1394: fix to ether1394_tx in ether1394.c
  firewire: fix hang after card ejection
diff --git a/CREDITS b/CREDITS
index 273d72b..79fd13d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3301,14 +3301,6 @@
 S: Beaverton, Oregon 97005
 S: USA
 
-N: Li Yang
-E: leoli@freescale.com
-D: Freescale Highspeed USB device driver
-D: Freescale QE SoC support and Ethernet driver
-S: B-1206 Jingmao Guojigongyu
-S: 16 Baliqiao Nanjie, Beijing 101100
-S: People's Repulic of China
-
 N: Marcelo Tosatti
 E: marcelo@kvack.org
 D: v2.4 kernel maintainer
@@ -3726,6 +3718,14 @@
 S: New York, New York 10025
 S: USA
 
+N: Li Yang
+E: leoli@freescale.com
+D: Freescale Highspeed USB device driver
+D: Freescale QE SoC support and Ethernet driver
+S: B-1206 Jingmao Guojigongyu
+S: 16 Baliqiao Nanjie, Beijing 101100
+S: People's Repulic of China
+
 N: Victor Yodaiken
 E: yodaiken@fsmlabs.com
 D: RTLinux (RealTime Linux)
diff --git a/MAINTAINERS b/MAINTAINERS
index bef7977..4ce895a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4022,11 +4022,11 @@
 
 XFS FILESYSTEM
 P:	Silicon Graphics Inc
-P:	Tim Shimmin, David Chatterton
+P:	Tim Shimmin
 M:	xfs-masters@oss.sgi.com
 L:	xfs@oss.sgi.com
 W:	http://oss.sgi.com/projects/xfs
-T:	git git://oss.sgi.com:8090/xfs/xfs-2.6
+T:	git git://oss.sgi.com:8090/xfs/xfs-2.6.git
 S:	Supported
 
 XILINX UARTLITE SERIAL DRIVER
diff --git a/Makefile b/Makefile
index 30d685b..b76ee94 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 22
-EXTRAVERSION = -rc4
-NAME = Jeff Thinks I Should Change This, But To What?
+EXTRAVERSION = -rc5
+NAME = Holy Dancing Manatees, Batman!
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b31c080..6293920 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -49,6 +49,7 @@
 config DEBUG_RODATA
 	bool "Write protect kernel read-only data structures"
 	depends on DEBUG_KERNEL
+	depends on !KPROBES # temporary for 2.6.22
 	help
 	  Mark the kernel read-only data as write-protected in the pagetables,
 	  in order to catch accidental (and incorrect) writes to such const
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index c4ebb51..6d59378 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -42,7 +42,7 @@
 module_param_named(show, mtrr_show, bool, 0);
 
 /*  Get the MSR pair relating to a var range  */
-static void __init
+static void
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
 {
 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
@@ -68,7 +68,7 @@
 	get_fixed_ranges(mtrr_state.fixed_ranges);
 }
 
-static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type*types)
+static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
 {
 	unsigned i;
 
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 7202b98..55b0051 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -639,7 +639,7 @@
  * initialized (i.e. before smp_init()).
  * 
  */
-void mtrr_bp_init(void)
+__init void mtrr_bp_init(void)
 {
 	init_ifs();
 
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 2b04c8f..f0b6763 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -28,7 +28,7 @@
 	void (*unreserve)(void);
 	int (*setup)(unsigned nmi_hz);
 	void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
-	void (*stop)(void *);
+	void (*stop)(void);
 	unsigned perfctr;
 	unsigned evntsel;
 	u64 checkbit;
@@ -142,7 +142,7 @@
 	if (atomic_read(&nmi_active) <= 0)
 		return;
 
-	on_each_cpu(wd_ops->stop, NULL, 0, 1);
+	on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
 	wd_ops->unreserve();
 
 	BUG_ON(atomic_read(&nmi_active) != 0);
@@ -255,7 +255,7 @@
 	return 1;
 }
 
-static void single_msr_stop_watchdog(void *arg)
+static void single_msr_stop_watchdog(void)
 {
 	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 
@@ -276,8 +276,8 @@
 
 static void single_msr_unreserve(void)
 {
-	release_evntsel_nmi(wd_ops->perfctr);
-	release_perfctr_nmi(wd_ops->evntsel);
+	release_evntsel_nmi(wd_ops->evntsel);
+	release_perfctr_nmi(wd_ops->perfctr);
 }
 
 static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
@@ -442,7 +442,7 @@
 	return 1;
 }
 
-static void stop_p4_watchdog(void *arg)
+static void stop_p4_watchdog(void)
 {
 	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 	wrmsr(wd->cccr_msr, 0, 0);
@@ -475,10 +475,10 @@
 {
 #ifdef CONFIG_SMP
 	if (smp_num_siblings > 1)
-		release_evntsel_nmi(MSR_P4_IQ_PERFCTR1);
+		release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
 #endif
-	release_evntsel_nmi(MSR_P4_IQ_PERFCTR0);
-	release_perfctr_nmi(MSR_P4_CRU_ESCR0);
+	release_evntsel_nmi(MSR_P4_CRU_ESCR0);
+	release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
 }
 
 static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
@@ -614,6 +614,12 @@
 		probe_nmi_watchdog();
 		if (!wd_ops)
 			return -1;
+
+		if (!wd_ops->reserve()) {
+			printk(KERN_ERR
+				"NMI watchdog: cannot reserve perfctrs\n");
+			return -1;
+		}
 	}
 
 	if (!(wd_ops->setup(nmi_hz))) {
@@ -628,7 +634,7 @@
 void lapic_watchdog_stop(void)
 {
 	if (wd_ops)
-		wd_ops->stop(NULL);
+		wd_ops->stop();
 }
 
 unsigned lapic_adjust_nmi_hz(unsigned hz)
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 30b754f..048f09b 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <asm/io.h>
 
 struct dma_coherent_mem {
@@ -148,3 +149,29 @@
 	return mem->virt_base + (pos << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+#ifdef CONFIG_PCI
+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
+
+int forbid_dac;
+EXPORT_SYMBOL(forbid_dac);
+
+static __devinit void via_no_dac(struct pci_dev *dev)
+{
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+		printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
+		forbid_dac = 1;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+
+static int check_iommu(char *s)
+{
+	if (!strcmp(s, "usedac")) {
+		forbid_dac = -1;
+		return 1;
+	}
+	return 0;
+}
+__setup("iommu=", check_iommu);
+#endif
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 47bd477..2eb14a7 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -68,14 +68,23 @@
 	return base;
 } 
 
-static void flush_kernel_map(void *arg)
+static void cache_flush_page(struct page *p)
 { 
-	unsigned long adr = (unsigned long)arg;
+	unsigned long adr = (unsigned long)page_address(p);
+	int i;
+	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+		asm volatile("clflush (%0)" :: "r" (adr + i));
+}
 
-	if (adr && cpu_has_clflush) {
-		int i;
-		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-			asm volatile("clflush (%0)" :: "r" (adr + i));
+static void flush_kernel_map(void *arg)
+{
+	struct list_head *lh = (struct list_head *)arg;
+	struct page *p;
+
+	/* High level code is not ready for clflush yet */
+	if (0 && cpu_has_clflush) {
+		list_for_each_entry (p, lh, lru)
+			cache_flush_page(p);
 	} else if (boot_cpu_data.x86_model >= 4)
 		wbinvd();
 
@@ -181,9 +190,9 @@
 	return 0;
 } 
 
-static inline void flush_map(void *adr)
+static inline void flush_map(struct list_head *l)
 {
-	on_each_cpu(flush_kernel_map, adr, 1, 1);
+	on_each_cpu(flush_kernel_map, l, 1, 1);
 }
 
 /*
@@ -225,11 +234,8 @@
 	spin_lock_irq(&cpa_lock);
 	list_replace_init(&df_list, &l);
 	spin_unlock_irq(&cpa_lock);
-	if (!cpu_has_clflush)
-		flush_map(NULL);
+	flush_map(&l);
 	list_for_each_entry_safe(pg, next, &l, lru) {
-		if (cpu_has_clflush)
-			flush_map(page_address(pg));
 		__free_page(pg);
 	}
 }
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 85cdd23..a86e2e9 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -418,9 +418,6 @@
 	help
 	  Say Y here to report ST-RAM usage statistics in /proc/stram.
 
-config ATARI_KBD_CORE
-	bool
-
 config HEARTBEAT
 	bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
 	default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 21eb599..2e01147 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -13,9 +13,9 @@
 #include <asm/system.h>
 #include <asm/hardirq.h>
 #include <asm/hazards.h>
+#include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/smp.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/mipsregs.h>
 #include <asm/cacheflush.h>
 #include <asm/time.h>
@@ -614,7 +614,7 @@
 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 	unsigned int vpe = current_cpu_data.vpe_id;
 
-	vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
+	vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
 #endif
 	irq_hwmask[irq] = hwmask;
 
@@ -822,7 +822,7 @@
 	switch (type_copy) {
 	case SMTC_CLOCK_TICK:
 		irq_enter();
-		kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
+		kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_perfcount_irq]++;
 		/* Invoke Clock "Interrupt" */
 		ipi_timer_latch[dest_copy] = 0;
 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 7def1ff..d48d1d5 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -200,10 +200,15 @@
 EXPORT_SYMBOL(perf_irq);
 
 /*
+ * Timer interrupt
+ */
+int cp0_compare_irq;
+
+/*
  * Performance counter IRQ or -1 if shared with timer
  */
-int mipsxx_perfcount_irq;
-EXPORT_SYMBOL(mipsxx_perfcount_irq);
+int cp0_perfcount_irq;
+EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
 
 /*
  * Possibly handle a performance counter interrupt.
@@ -213,12 +218,12 @@
 {
 	/*
 	 * The performance counter overflow interrupt may be shared with the
-	 * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
+	 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
 	 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
 	 * and we can't reliably determine if a counter interrupt has also
 	 * happened (!r2) then don't check for a timer interrupt.
 	 */
-	return (mipsxx_perfcount_irq < 0) &&
+	return (cp0_perfcount_irq < 0) &&
 		perf_irq() == IRQ_HANDLED &&
 		!r2;
 }
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a7a17eb..b123364 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1350,9 +1350,6 @@
 	if (!secondaryTC) {
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-	/*
-	 * Interrupt handling.
-	 */
 	if (cpu_has_veic || cpu_has_vint) {
 		write_c0_ebase (ebase);
 		/* Setting vector spacing enables EI/VI mode  */
@@ -1366,6 +1363,23 @@
 		} else
 			set_c0_cause(CAUSEF_IV);
 	}
+
+	/*
+	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
+	 *
+	 *  o read IntCtl.IPTI to determine the timer interrupt
+	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
+	 */
+	if (cpu_has_mips_r2) {
+		cp0_compare_irq = (read_c0_intctl () >> 29) & 7;
+		cp0_perfcount_irq = -1;
+	} else {
+		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+		cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7;
+		if (cp0_perfcount_irq != cp0_compare_irq)
+			cp0_perfcount_irq = -1;
+	}
+
 #ifdef CONFIG_MIPS_MT_SMTC
 	}
 #endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index 9f49da9..6c8f025 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -189,7 +189,7 @@
 	if (irq == MIPSCPU_INT_ATLAS)
 		atlas_hw0_irqdispatch();
 	else if (irq >= 0)
-		do_IRQ(MIPSCPU_INT_BASE + irq);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
 	else
 		spurious_interrupt();
 }
@@ -261,11 +261,11 @@
 	} else if (cpu_has_vint) {
 		set_vi_handler (MIPSCPU_INT_ATLAS, atlas_hw0_irqdispatch);
 #ifdef CONFIG_MIPS_MT_SMTC
-		setup_irq_smtc (MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS,
+		setup_irq_smtc (MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS,
 				&atlasirq, (0x100 << MIPSCPU_INT_ATLAS));
 #else /* Not SMTC */
-		setup_irq(MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS, &atlasirq);
+		setup_irq(MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS, &atlasirq);
 #endif /* CONFIG_MIPS_MT_SMTC */
 	} else
-		setup_irq(MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS, &atlasirq);
+		setup_irq(MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS, &atlasirq);
 }
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 8f1000f..c45d556 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -54,7 +54,7 @@
 unsigned long cpu_khz;
 
 static int mips_cpu_timer_irq;
-extern int mipsxx_perfcount_irq;
+extern int cp0_perfcount_irq;
 extern void smtc_timer_broadcast(int);
 
 static void mips_timer_dispatch(void)
@@ -64,7 +64,7 @@
 
 static void mips_perf_dispatch(void)
 {
-	do_IRQ(mipsxx_perfcount_irq);
+	do_IRQ(cp0_perfcount_irq);
 }
 
 /*
@@ -82,12 +82,12 @@
 {
 	/*
 	 * The performance counter overflow interrupt may be shared with the
-	 * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
+	 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
 	 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
 	 * and we can't reliably determine if a counter interrupt has also
 	 * happened (!r2) then don't check for a timer interrupt.
 	 */
-	return (mipsxx_perfcount_irq < 0) &&
+	return (cp0_perfcount_irq < 0) &&
 		perf_irq() == IRQ_HANDLED &&
 		!r2;
 }
@@ -259,42 +259,31 @@
 
 void __init plat_perf_setup(struct irqaction *irq)
 {
-	int hwint = 0;
-	mipsxx_perfcount_irq = -1;
+	cp0_perfcount_irq = -1;
 
 #ifdef MSC01E_INT_BASE
 	if (cpu_has_veic) {
 		set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch);
-		mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+		cp0_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
 	} else
 #endif
-	if (cpu_has_mips_r2) {
-		/*
-		 * Read IntCtl.IPPCI to determine the performance
-		 * counter interrupt
-		 */
-		hwint = (read_c0_intctl () >> 26) & 7;
-		if (hwint != MIPSCPU_INT_CPUCTR) {
-			if (cpu_has_vint)
-				set_vi_handler (hwint, mips_perf_dispatch);
-			mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint;
-		}
-	}
-	if (mipsxx_perfcount_irq >= 0) {
+	if (cp0_perfcount_irq >= 0) {
+		if (cpu_has_vint)
+			set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
 #ifdef CONFIG_MIPS_MT_SMTC
-		setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint);
+		setup_irq_smtc(cp0_perfcount_irq, irq,
+		               0x100 << cp0_perfcount_irq);
 #else
-		setup_irq(mipsxx_perfcount_irq, irq);
+		setup_irq(cp0_perfcount_irq, irq);
 #endif /* CONFIG_MIPS_MT_SMTC */
 #ifdef CONFIG_SMP
-		set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq);
+		set_irq_handler(cp0_perfcount_irq, handle_percpu_irq);
 #endif
 	}
 }
 
 void __init plat_timer_setup(struct irqaction *irq)
 {
-	int hwint = 0;
 #ifdef MSC01E_INT_BASE
 	if (cpu_has_veic) {
 		set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
@@ -303,22 +292,15 @@
 	else
 #endif
 	{
-		if (cpu_has_mips_r2)
-			/*
-			 * Read IntCtl.IPTI to determine the timer interrupt
-			 */
-			hwint = (read_c0_intctl () >> 29) & 7;
-		else
-			hwint = MIPSCPU_INT_CPUCTR;
 		if (cpu_has_vint)
-			set_vi_handler (hwint, mips_timer_dispatch);
-		mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint;
+			set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
+		mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
 	}
 
 	/* we are using the cpu counter for timer interrupts */
 	irq->handler = mips_timer_interrupt;	/* we use our own handler */
 #ifdef CONFIG_MIPS_MT_SMTC
-	setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint);
+	setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
 #else
 	setup_irq(mips_cpu_timer_irq, irq);
 #endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 1668cc2..c78d4834 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -257,7 +257,7 @@
 	if (irq == MIPSCPU_INT_I8259A)
 		malta_hw0_irqdispatch();
 	else if (irq > 0)
-		do_IRQ(MIPSCPU_INT_BASE + irq);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
 	else
 		spurious_interrupt();
 }
@@ -326,17 +326,17 @@
 		set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
 		set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
 #ifdef CONFIG_MIPS_MT_SMTC
-		setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq,
+		setup_irq_smtc (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq,
 			(0x100 << MIPSCPU_INT_I8259A));
-		setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI,
+		setup_irq_smtc (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
 			&corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
 #else /* Not SMTC */
-		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
-		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
+		setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
+		setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
 #endif /* CONFIG_MIPS_MT_SMTC */
 	}
 	else {
-		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
-		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
+		setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
+		setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
 	}
 }
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c
index c4b9de3..9ca0f82 100644
--- a/arch/mips/mips-boards/sead/sead_int.c
+++ b/arch/mips/mips-boards/sead/sead_int.c
@@ -106,7 +106,7 @@
 	irq = irq_ffs(pending);
 
 	if (irq >= 0)
-		do_IRQ(MIPSCPU_INT_BASE + irq);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
 	else
 		spurious_interrupt();
 }
diff --git a/arch/mips/mips-boards/sead/sead_setup.c b/arch/mips/mips-boards/sead/sead_setup.c
index 811aba1..bb80140 100644
--- a/arch/mips/mips-boards/sead/sead_setup.c
+++ b/arch/mips/mips-boards/sead/sead_setup.c
@@ -68,7 +68,7 @@
 #else
 	s.iobase = SEAD_UART0_REGS_BASE+3;
 #endif
-	s.irq = MIPSCPU_INT_BASE + MIPSCPU_INT_UART0;
+	s.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_UART0;
 	s.uartclk = SEAD_BASE_BAUD * 16;
 	s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ;
 	s.iotype = UPIO_PORT;
diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c
index 15ac065..766e015 100644
--- a/arch/mips/mips-boards/sim/sim_int.c
+++ b/arch/mips/mips-boards/sim/sim_int.c
@@ -77,7 +77,7 @@
 	irq = irq_ffs(pending);
 
 	if (irq > 0)
-		do_IRQ(MIPSCPU_INT_BASE + irq);
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
 	else
 		spurious_interrupt();
 }
diff --git a/arch/mips/mips-boards/sim/sim_time.c b/arch/mips/mips-boards/sim/sim_time.c
index d3a21c7..7224ffe 100644
--- a/arch/mips/mips-boards/sim/sim_time.c
+++ b/arch/mips/mips-boards/sim/sim_time.c
@@ -71,8 +71,8 @@
 
 	int vpflags = dvpe();
 	write_c0_compare (read_c0_count() - 1);
-	clear_c0_cause(0x100 << MIPSCPU_INT_CPUCTR);
-	set_c0_status(0x100 << MIPSCPU_INT_CPUCTR);
+	clear_c0_cause(0x100 << cp0_compare_irq);
+	set_c0_status(0x100 << cp0_compare_irq);
 	irq_enable_hazard();
 	evpe(vpflags);
 
@@ -183,8 +183,8 @@
 	}
 	else {
 		if (cpu_has_vint)
-			set_vi_handler(MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
-		mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
+			set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
+		mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
 	}
 
 	/* we are using the cpu counter for timer interrupts */
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 180ee29..2f24ea0 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -437,27 +437,26 @@
 	struct list_head *l;
 	rh_block_t *blk;
 	rh_block_t *newblk;
-	unsigned long start;
+	unsigned long start, sp_size;
 
 	/* Validate size, and alignment must be power of two */
 	if (size <= 0 || (alignment & (alignment - 1)) != 0)
 		return (unsigned long) -EINVAL;
 
-	/* given alignment larger that default rheap alignment */
-	if (alignment > info->alignment)
-		size += alignment - 1;
-
 	/* Align to configured alignment */
 	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
 
-	if (assure_empty(info, 1) < 0)
+	if (assure_empty(info, 2) < 0)
 		return (unsigned long) -ENOMEM;
 
 	blk = NULL;
 	list_for_each(l, &info->free_list) {
 		blk = list_entry(l, rh_block_t, list);
-		if (size <= blk->size)
-			break;
+		if (size <= blk->size) {
+			start = (blk->start + alignment - 1) & ~(alignment - 1);
+			if (start + size <= blk->start + blk->size)
+				break;
+		}
 		blk = NULL;
 	}
 
@@ -470,25 +469,36 @@
 		list_del(&blk->list);
 		newblk = blk;
 	} else {
+		/* Fragment caused, split if needed */
+		/* Create block for fragment in the beginning */
+		sp_size = start - blk->start;
+		if (sp_size) {
+			rh_block_t *spblk;
+
+			spblk = get_slot(info);
+			spblk->start = blk->start;
+			spblk->size = sp_size;
+			/* add before the blk */
+			list_add(&spblk->list, blk->list.prev);
+		}
 		newblk = get_slot(info);
-		newblk->start = blk->start;
+		newblk->start = start;
 		newblk->size = size;
 
-		/* blk still in free list, with updated start, size */
-		blk->start += size;
-		blk->size -= size;
+		/* blk still in free list, with updated start and size
+		 * for fragment in the end */
+		blk->start = start + size;
+		blk->size -= sp_size + size;
+		/* No fragment in the end, remove blk */
+		if (blk->size == 0) {
+			list_del(&blk->list);
+			release_slot(info, blk);
+		}
 	}
 
 	newblk->owner = owner;
-	start = newblk->start;
 	attach_taken_block(info, newblk);
 
-	/* for larger alignment return fixed up pointer  */
-	/* this is no problem with the deallocator since */
-	/* we scan for pointers that lie in the blocks   */
-	if (alignment > info->alignment)
-		start = (start + alignment - 1) & ~(alignment - 1);
-
 	return start;
 }
 
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bfe9013..115b25f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -279,14 +279,13 @@
 #endif /* CONFIG_8xx */
 
 	if (is_exec) {
-#ifdef CONFIG_PPC64
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 		/* protection fault */
 		if (error_code & DSISR_PROTFAULT)
 			goto bad_area;
 		if (!(vma->vm_flags & VM_EXEC))
 			goto bad_area;
-#endif
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#else
 		pte_t *ptep;
 		pmd_t *pmdp;
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9565715..7ccb923 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -454,6 +454,9 @@
 
 static int pmac_late_init(void)
 {
+	if (!machine_is(powermac))
+		return -ENODEV;
+
 	initializing = 0;
 	/* this is udbg (which is __init) and we can later use it during
 	 * cpu hotplug (in smp_core99_kick_cpu) */
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 81a2b92..6ffbab7 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -535,8 +535,7 @@
 
 /******************************* init / exit *********************************/
 
-static void
-appldata_online_cpu(int cpu)
+static void __cpuinit appldata_online_cpu(int cpu)
 {
 	init_virt_timer(&per_cpu(appldata_timer, cpu));
 	per_cpu(appldata_timer, cpu).function = appldata_timer_function;
@@ -580,7 +579,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block appldata_nb = {
+static struct notifier_block __cpuinitdata appldata_nb = {
 	.notifier_call = appldata_cpu_notify,
 };
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c8a2212..6234c697 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -769,10 +769,13 @@
 
 	RESTORE_ALL __LC_RETURN_MCCK_PSW,0
 
-#ifdef CONFIG_SMP
 /*
  * Restart interruption handler, kick starter for additional CPUs
  */
+#ifdef CONFIG_SMP
+#ifndef CONFIG_HOTPLUG_CPU
+	.section .init.text,"ax"
+#endif
 	.globl restart_int_handler
 restart_int_handler:
 	l	%r15,__LC_SAVE_AREA+60	# load ksp
@@ -785,6 +788,9 @@
 	br	%r14			# branch to start_secondary
 restart_addr:
 	.long	start_secondary
+#ifndef CONFIG_HOTPLUG_CPU
+	.previous
+#endif
 #else
 /*
  * If we do not run with SMP enabled, let the new CPU crash ...
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 93745fd..685f11fa 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -745,10 +745,13 @@
 #endif
 	lpswe	__LC_RETURN_MCCK_PSW	# back to caller
 
-#ifdef CONFIG_SMP
 /*
  * Restart interruption handler, kick starter for additional CPUs
  */
+#ifdef CONFIG_SMP
+#ifndef CONFIG_HOTPLUG_CPU
+	.section .init.text,"ax"
+#endif
 	.globl restart_int_handler
 restart_int_handler:
 	lg	%r15,__LC_SAVE_AREA+120 # load ksp
@@ -759,6 +762,9 @@
 	lmg	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
 	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
 	jg	start_secondary
+#ifndef CONFIG_HOTPLUG_CPU
+	.previous
+#endif
 #else
 /*
  * If we do not run with SMP enabled, let the new CPU crash ...
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 51d6309..7e1bfb9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -300,6 +300,7 @@
 	else
 		sprintf(str, "cio_ignore=all,!0.0.%04x",
 			ipl_info.data.fcp.dev_id.devno);
+	strcat(COMMAND_LINE, " ");
 	strcat(COMMAND_LINE, str);
 	console_loglevel = 2;
 }
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index cbfe730..ee9186f 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -253,19 +253,22 @@
 {
 	static int die_counter;
 
+	oops_enter();
 	debug_stop_all();
 	console_verbose();
 	spin_lock_irq(&die_lock);
 	bust_spinlocks(1);
 	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-        show_regs(regs);
+	print_modules();
+	show_regs(regs);
 	bust_spinlocks(0);
-        spin_unlock_irq(&die_lock);
+	spin_unlock_irq(&die_lock);
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
 	if (panic_on_oops)
 		panic("Fatal exception: panic_on_oops");
-        do_exit(SIGSEGV);
+	oops_exit();
+	do_exit(SIGSEGV);
 }
 
 static void inline
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index b32c35a..e323e29 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -268,7 +268,7 @@
 badframe:
 	force_sig(SIGSEGV, current);
 	return 0;
-}	
+}
 
 /*
  * Set up a signal frame.
@@ -481,7 +481,7 @@
 
 static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs)
+	      sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
 {
 	int ret;
 
@@ -489,6 +489,7 @@
 	if (regs->tra >= 0) {
 		/* If so, check system call restarting.. */
 		switch (regs->regs[0]) {
+			case -ERESTART_RESTARTBLOCK:
 			case -ERESTARTNOHAND:
 				regs->regs[0] = -EINTR;
 				break;
@@ -500,6 +501,7 @@
 				}
 			/* fallthrough */
 			case -ERESTARTNOINTR:
+				regs->regs[0] = save_r0;
 				regs->pc -= instruction_size(
 						ctrl_inw(regs->pc - 4));
 				break;
@@ -583,7 +585,8 @@
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+		if (handle_signal(signr, &ka, &info, oldset,
+				  regs, save_r0) == 0) {
 			/* a signal was successfully delivered; the saved
 			 * sigmask will have been stored in the signal frame,
 			 * and will be restored by sigreturn, so we can simply
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 5b75cb6..8f18930 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -83,6 +83,8 @@
 {
 	static int die_counter;
 
+	oops_enter();
+
 	console_verbose();
 	spin_lock_irq(&die_lock);
 	bust_spinlocks(1);
@@ -112,6 +114,7 @@
 	if (panic_on_oops)
 		panic("Fatal exception");
 
+	oops_exit();
 	do_exit(SIGSEGV);
 }
 
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index c8525ad..0bb4a8f 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -640,6 +640,7 @@
 	if (regs->syscall_nr >= 0) {
 		/* If so, check system call restarting.. */
 		switch (regs->regs[REG_RET]) {
+			case -ERESTART_RESTARTBLOCK:
 			case -ERESTARTNOHAND:
 				regs->regs[REG_RET] = -EINTR;
 				break;
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h
index 541f4a8..7376ee4 100644
--- a/arch/um/include/common-offsets.h
+++ b/arch/um/include/common-offsets.h
@@ -9,6 +9,7 @@
 OFFSET(HOST_TASK_PID, task_struct, pid);
 
 DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE);
+DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
 DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
 
 DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index ea9a236..fb510d4 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -24,6 +24,7 @@
 #include "uml-config.h"
 #include "os.h"
 #include "um_malloc.h"
+#include "kern_constants.h"
 
 /* Set in main, unchanged thereafter */
 char *linux_prog;
@@ -232,7 +233,8 @@
 
 	if(!CAN_KMALLOC())
 		return __real_malloc(size);
-	else if(size <= PAGE_SIZE) /* finding contiguos pages can be hard*/
+	else if(size <= UM_KERN_PAGE_SIZE)
+		/* finding contiguous pages can be hard*/
 		ret = um_kmalloc(size);
 	else ret = um_vmalloc(size);
 
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index 5c894632..0f7df4e 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -25,6 +25,7 @@
 #include "sysdep/ptrace.h"
 #include "sysdep/stub.h"
 #include "init.h"
+#include "kern_constants.h"
 
 extern unsigned long batch_syscall_stub, __syscall_stub_start;
 
@@ -149,8 +150,8 @@
 	*stack = 0;
 	multi_op_count++;
 
-	if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
-		     PAGE_SIZE - 10 * sizeof(long))){
+	if(!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) <
+		     UM_KERN_PAGE_SIZE - 10 * sizeof(long))){
 		*addr = stack;
 		return 0;
 	}
@@ -168,8 +169,8 @@
 	/* If *addr still is uninitialized, it *must* contain NULL.
 	 * Thus in this case do_syscall_stub correctly won't be called.
 	 */
-	if((((unsigned long) *addr) & ~PAGE_MASK) >=
-	   PAGE_SIZE - (10 + data_count) * sizeof(long)) {
+	if((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >=
+	   UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) {
 		ret = do_syscall_stub(mm_idp, addr);
 		/* in case of error, don't overwrite data on stack */
 		if(ret)
@@ -183,8 +184,8 @@
 
 	memcpy(stack + 1, data, data_count * sizeof(long));
 
-	*stub_addr = (void *)(((unsigned long)(stack + 1) & ~PAGE_MASK) +
-			      UML_CONFIG_STUB_DATA);
+	*stub_addr = (void *)(((unsigned long)(stack + 1) &
+			       ~UM_KERN_PAGE_MASK) + UML_CONFIG_STUB_DATA);
 
 	return 0;
 }
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index f9d2f85..46c00cc 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -252,11 +252,12 @@
 	unsigned long sp;
 	int pid, status, n, flags;
 
-	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
+	stack = mmap(NULL, UM_KERN_PAGE_SIZE,
+		     PROT_READ | PROT_WRITE | PROT_EXEC,
 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 	if(stack == MAP_FAILED)
 		panic("start_userspace : mmap failed, errno = %d", errno);
-	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
+	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
 
 	flags = CLONE_FILES | SIGCHLD;
 	if(proc_mm) flags |= CLONE_VM;
@@ -279,7 +280,7 @@
 		panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n",
 		      errno);
 
-	if(munmap(stack, PAGE_SIZE) < 0)
+	if(munmap(stack, UM_KERN_PAGE_SIZE) < 0)
 		panic("start_userspace : munmap failed, errno = %d\n", errno);
 
 	return(pid);
@@ -365,7 +366,7 @@
 	thread_regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
 				(unsigned long) stub_clone_handler -
 				(unsigned long) &__syscall_stub_start;
-	thread_regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
+	thread_regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + UM_KERN_PAGE_SIZE -
 		sizeof(void *);
 #ifdef __SIGNAL_FRAMESIZE
 	thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
@@ -453,7 +454,7 @@
 				      .u         =
 				      { .mmap    =
 					{ .addr    = code,
-					  .len     = PAGE_SIZE,
+					  .len     = UM_KERN_PAGE_SIZE,
 					  .prot    = PROT_EXEC,
 					  .flags   = MAP_FIXED | MAP_PRIVATE,
 					  .fd      = code_fd,
@@ -476,7 +477,7 @@
 				  .u         =
 				  { .mmap    =
 				    { .addr    = data,
-				      .len     = PAGE_SIZE,
+				      .len     = UM_KERN_PAGE_SIZE,
 				      .prot    = PROT_READ | PROT_WRITE,
 				      .flags   = MAP_FIXED | MAP_SHARED,
 				      .fd      = map_fd,
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 3fc13fa..46f6139 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -107,11 +107,12 @@
 	unsigned long sp;
 	int pid, n, status;
 
-	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
+	stack = mmap(NULL, UM_KERN_PAGE_SIZE,
+		     PROT_READ | PROT_WRITE | PROT_EXEC,
 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 	if(stack == MAP_FAILED)
 		fatal_perror("check_ptrace : mmap failed");
-	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
+	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
 	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
 	if(pid < 0)
 		fatal_perror("start_ptraced_child : clone failed");
@@ -153,7 +154,7 @@
 		ret = -1;
 	}
 
-	if(munmap(stack, PAGE_SIZE) < 0)
+	if(munmap(stack, UM_KERN_PAGE_SIZE) < 0)
 		fatal_perror("check_ptrace : munmap failed");
 	return ret;
 }
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index 775d211..8a86775 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -9,6 +9,7 @@
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
+       depends on !KPROBES # temporary for 2.6.22
        help
 	 Mark the kernel read-only data as write-protected in the pagetables,
 	 in order to catch accidental (and incorrect) writes to such const data.
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 21868f9..47565c3 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -620,7 +620,7 @@
 	.quad quiet_ni_syscall		/* tux */
 	.quad quiet_ni_syscall    	/* security */
 	.quad sys_gettid	
-	.quad sys_readahead	/* 225 */
+	.quad sys32_readahead	/* 225 */
 	.quad sys_setxattr
 	.quad sys_lsetxattr
 	.quad sys_fsetxattr
@@ -645,7 +645,7 @@
 	.quad compat_sys_io_getevents
 	.quad compat_sys_io_submit
 	.quad sys_io_cancel
-	.quad sys_fadvise64		/* 250 */
+	.quad sys32_fadvise64		/* 250 */
 	.quad quiet_ni_syscall 	/* free_huge_pages */
 	.quad sys_exit_group
 	.quad sys32_lookup_dcookie
@@ -709,7 +709,7 @@
 	.quad compat_sys_set_robust_list
 	.quad compat_sys_get_robust_list
 	.quad sys_splice
-	.quad sys_sync_file_range
+	.quad sys32_sync_file_range
 	.quad sys_tee			/* 315 */
 	.quad compat_sys_vmsplice
 	.quad compat_sys_move_pages
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 200fdde..99a78a3 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -860,3 +860,22 @@
 	return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
 }
 
+asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count)
+{
+	return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
+}
+
+asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+			   unsigned n_low, unsigned n_hi,  int flags)
+{
+	return sys_sync_file_range(fd,
+				   ((u64)off_hi << 32) | off_low,
+				   ((u64)n_hi << 32) | n_low, flags);
+}
+
+asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len,
+		     int advice)
+{
+	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
+				len, advice);
+}
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 651ccfb..9f80aad 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -322,5 +322,17 @@
 	return 0;
 }
 
+#ifdef CONFIG_PCI
+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
+
+static __devinit void via_no_dac(struct pci_dev *dev)
+{
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+		printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
+		forbid_dac = 1;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+#endif
 /* Must execute after PCI subsystem */
 fs_initcall(pci_iommu_init);
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index d653d0b..9148f4a 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -74,10 +74,11 @@
 	struct page *pg;
 
 	/* When clflush is available always use it because it is
-	   much cheaper than WBINVD */
-	if (!cpu_has_clflush)
+	   much cheaper than WBINVD. Disable clflush for now because
+	   the high level code is not ready yet */
+	if (1 || !cpu_has_clflush)
 		asm volatile("wbinvd" ::: "memory");
-	list_for_each_entry(pg, l, lru) {
+	else list_for_each_entry(pg, l, lru) {
 		void *adr = page_address(pg);
 		if (cpu_has_clflush)
 			cache_flush_page(adr);
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 1cfbecb..13369b4 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -524,7 +524,7 @@
 	return AE_OK;
 }
 
-static acpi_status __exit remove_device(void)
+static acpi_status remove_device(void)
 {
 	ProcItem *item;
 
diff --git a/drivers/char/drm/radeon_ioc32.c b/drivers/char/drm/radeon_ioc32.c
index 04126c2..56decda 100644
--- a/drivers/char/drm/radeon_ioc32.c
+++ b/drivers/char/drm/radeon_ioc32.c
@@ -349,6 +349,8 @@
 			 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
 }
 
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
 typedef struct drm_radeon_setparam32 {
 	int param;
 	u64 value;
@@ -373,6 +375,9 @@
 	return drm_ioctl(file->f_dentry->d_inode, file,
 			 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
 }
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
 
 drm_ioctl_compat_t *radeon_compat_ioctls[] = {
 	[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0474cac..7f52712 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -794,7 +794,7 @@
 
 	buf[0] ^= buf[3];
 	buf[1] ^= buf[4];
-	buf[0] ^= rol32(buf[3], 16);
+	buf[2] ^= rol32(buf[2], 16);
 	memcpy(out, buf, EXTRACT_SIZE);
 	memset(buf, 0, sizeof(buf));
 }
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 3752edc..a96f26a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1173,8 +1173,14 @@
 	return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
 }
 
-static long hung_up_tty_ioctl(struct file * file,
-			      unsigned int cmd, unsigned long arg)
+static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
+			     unsigned int cmd, unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file * file,
+				     unsigned int cmd, unsigned long arg)
 {
 	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
 }
@@ -1222,8 +1228,8 @@
 	.read		= hung_up_tty_read,
 	.write		= hung_up_tty_write,
 	.poll		= hung_up_tty_poll,
-	.unlocked_ioctl = hung_up_tty_ioctl,
-	.compat_ioctl	= hung_up_tty_ioctl,
+	.ioctl		= hung_up_tty_ioctl,
+	.compat_ioctl	= hung_up_tty_compat_ioctl,
 	.release	= tty_release,
 };
 
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index b2a290c..660b27a 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -354,8 +354,8 @@
 	if (is_send) {
 		wq = &(*cur_qp)->sq;
 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
-		wq->tail += wqe_ctr - (u16) wq->tail;
-		wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
+		wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 		++wq->tail;
 	} else if ((*cur_qp)->ibqp.srq) {
 		srq = to_msrq((*cur_qp)->ibqp.srq);
@@ -364,7 +364,7 @@
 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
 	} else {
 		wq	  = &(*cur_qp)->rq;
-		wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 		++wq->tail;
 	}
 
@@ -478,7 +478,8 @@
 {
 	u32 prod_index;
 	int nfreed = 0;
-	struct mlx4_cqe *cqe;
+	struct mlx4_cqe *cqe, *dest;
+	u8 owner_bit;
 
 	/*
 	 * First we need to find the current producer index, so we
@@ -501,9 +502,13 @@
 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
 			++nfreed;
-		} else if (nfreed)
-			memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
-			       cqe, sizeof *cqe);
+		} else if (nfreed) {
+			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
+			memcpy(dest, cqe, sizeof *cqe);
+			dest->owner_sr_opcode = owner_bit |
+				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
+		}
 	}
 
 	if (nfreed) {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 402f3a2..1095c82 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -125,7 +125,7 @@
 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
 		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
-	props->max_pkeys	   = dev->dev->caps.pkey_table_len;
+	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
 	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
 	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -168,9 +168,9 @@
 	props->state		= out_mad->data[32] & 0xf;
 	props->phys_state	= out_mad->data[33] >> 4;
 	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
-	props->gid_tbl_len	= to_mdev(ibdev)->dev->caps.gid_table_len;
+	props->gid_tbl_len	= to_mdev(ibdev)->dev->caps.gid_table_len[port];
 	props->max_msg_sz	= 0x80000000;
-	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len;
+	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
 	props->active_width	= out_mad->data[31] & 0xf;
@@ -280,8 +280,14 @@
 		return PTR_ERR(mailbox);
 
 	memset(mailbox->buf, 0, 256);
-	*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
-	((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
+
+	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
+		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
+	} else {
+		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
+		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
+	}
 
 	err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
 		       MLX4_CMD_TIME_CLASS_B);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 93dac71..24ccadd 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -95,7 +95,8 @@
 struct mlx4_ib_wq {
 	u64		       *wrid;
 	spinlock_t		lock;
-	int			max;
+	int			wqe_cnt;
+	int			max_post;
 	int			max_gs;
 	int			offset;
 	int			wqe_shift;
@@ -113,6 +114,7 @@
 
 	u32			doorbell_qpn;
 	__be32			sq_signal_bits;
+	int			sq_spare_wqes;
 	struct mlx4_ib_wq	sq;
 
 	struct ib_umem	       *umem;
@@ -123,6 +125,7 @@
 	u8			alt_port;
 	u8			atomic_rd_en;
 	u8			resp_depth;
+	u8			sq_no_prefetch;
 	u8			state;
 };
 
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5c6d054..28a08bd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -109,6 +109,20 @@
 	return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
 }
 
+/*
+ * Stamp a SQ WQE so that it is invalid if prefetched by marking the
+ * first four bytes of every 64 byte chunk with 0xffffffff, except for
+ * the very first chunk of the WQE.
+ */
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
+{
+	u32 *wqe = get_send_wqe(qp, n);
+	int i;
+
+	for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16)
+		wqe[i] = 0xffffffff;
+}
+
 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
 {
 	struct ib_event event;
@@ -178,6 +192,8 @@
 	case IB_QPT_GSI:
 		return sizeof (struct mlx4_wqe_ctrl_seg) +
 			ALIGN(MLX4_IB_UD_HEADER_SIZE +
+			      DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
+					   MLX4_INLINE_ALIGN) *
 			      sizeof (struct mlx4_wqe_inline_seg),
 			      sizeof (struct mlx4_wqe_data_seg)) +
 			ALIGN(4 +
@@ -201,18 +217,18 @@
 		if (cap->max_recv_wr)
 			return -EINVAL;
 
-		qp->rq.max = qp->rq.max_gs = 0;
+		qp->rq.wqe_cnt = qp->rq.max_gs = 0;
 	} else {
 		/* HW requires >= 1 RQ entry with >= 1 gather entry */
 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
 			return -EINVAL;
 
-		qp->rq.max	 = roundup_pow_of_two(max(1, cap->max_recv_wr));
-		qp->rq.max_gs	 = roundup_pow_of_two(max(1, cap->max_recv_sge));
+		qp->rq.wqe_cnt	 = roundup_pow_of_two(max(1U, cap->max_recv_wr));
+		qp->rq.max_gs	 = roundup_pow_of_two(max(1U, cap->max_recv_sge));
 		qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
 	}
 
-	cap->max_recv_wr  = qp->rq.max;
+	cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
 	cap->max_recv_sge = qp->rq.max_gs;
 
 	return 0;
@@ -236,8 +252,6 @@
 	    cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
 		return -EINVAL;
 
-	qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 1;
-
 	qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
 							sizeof (struct mlx4_wqe_data_seg),
 							cap->max_inline_data +
@@ -246,20 +260,27 @@
 	qp->sq.max_gs    = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
 		sizeof (struct mlx4_wqe_data_seg);
 
-	qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
-		(qp->sq.max << qp->sq.wqe_shift);
+	/*
+	 * We need to leave 2 KB + 1 WQE of headroom in the SQ to
+	 * allow HW to prefetch.
+	 */
+	qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
+	qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes);
+
+	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
 	if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
 		qp->rq.offset = 0;
-		qp->sq.offset = qp->rq.max << qp->rq.wqe_shift;
+		qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
 	} else {
-		qp->rq.offset = qp->sq.max << qp->sq.wqe_shift;
+		qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
 		qp->sq.offset = 0;
 	}
 
-	cap->max_send_wr     = qp->sq.max;
-	cap->max_send_sge    = qp->sq.max_gs;
-	cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) -
-		sizeof (struct mlx4_wqe_inline_seg);
+	cap->max_send_wr  = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes;
+	cap->max_send_sge = qp->sq.max_gs;
+	/* We don't support inline sends for kernel QPs (yet) */
+	cap->max_inline_data = 0;
 
 	return 0;
 }
@@ -267,11 +288,11 @@
 static int set_user_sq_size(struct mlx4_ib_qp *qp,
 			    struct mlx4_ib_create_qp *ucmd)
 {
-	qp->sq.max       = 1 << ucmd->log_sq_bb_count;
+	qp->sq.wqe_cnt   = 1 << ucmd->log_sq_bb_count;
 	qp->sq.wqe_shift = ucmd->log_sq_stride;
 
-	qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
-		(qp->sq.max << qp->sq.wqe_shift);
+	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+		(qp->sq.wqe_cnt << qp->sq.wqe_shift);
 
 	return 0;
 }
@@ -307,6 +328,8 @@
 			goto err;
 		}
 
+		qp->sq_no_prefetch = ucmd.sq_no_prefetch;
+
 		err = set_user_sq_size(qp, &ucmd);
 		if (err)
 			goto err;
@@ -334,6 +357,8 @@
 				goto err_mtt;
 		}
 	} else {
+		qp->sq_no_prefetch = 0;
+
 		err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
 		if (err)
 			goto err;
@@ -360,16 +385,13 @@
 		if (err)
 			goto err_mtt;
 
-		qp->sq.wrid  = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
-		qp->rq.wrid  = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
+		qp->sq.wrid  = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL);
+		qp->rq.wrid  = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL);
 
 		if (!qp->sq.wrid || !qp->rq.wrid) {
 			err = -ENOMEM;
 			goto err_wrid;
 		}
-
-		/* We don't support inline sends for kernel QPs (yet) */
-		init_attr->cap.max_inline_data = 0;
 	}
 
 	err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
@@ -583,24 +605,6 @@
 	return 0;
 }
 
-static void init_port(struct mlx4_ib_dev *dev, int port)
-{
-	struct mlx4_init_port_param param;
-	int err;
-
-	memset(&param, 0, sizeof param);
-
-	param.port_width_cap = dev->dev->caps.port_width_cap;
-	param.vl_cap	     = dev->dev->caps.vl_cap;
-	param.mtu	     = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap);
-	param.max_gid	     = dev->dev->caps.gid_table_len;
-	param.max_pkey	     = dev->dev->caps.pkey_table_len;
-
-	err = mlx4_INIT_PORT(dev->dev, &param, port);
-	if (err)
-		printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err);
-}
-
 static int to_mlx4_st(enum ib_qp_type type)
 {
 	switch (type) {
@@ -674,9 +678,9 @@
 	path->counter_index = 0xff;
 
 	if (ah->ah_flags & IB_AH_GRH) {
-		if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) {
+		if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
 			printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
-			       ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1);
+			       ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
 			return -1;
 		}
 
@@ -743,14 +747,17 @@
 		context->mtu_msgmax = (attr->path_mtu << 5) | 31;
 	}
 
-	if (qp->rq.max)
-		context->rq_size_stride = ilog2(qp->rq.max) << 3;
+	if (qp->rq.wqe_cnt)
+		context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
 	context->rq_size_stride |= qp->rq.wqe_shift - 4;
 
-	if (qp->sq.max)
-		context->sq_size_stride = ilog2(qp->sq.max) << 3;
+	if (qp->sq.wqe_cnt)
+		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
 	context->sq_size_stride |= qp->sq.wqe_shift - 4;
 
+	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
+
 	if (qp->ibqp.uobject)
 		context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
 	else
@@ -789,13 +796,14 @@
 	}
 
 	if (attr_mask & IB_QP_ALT_PATH) {
-		if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len)
-			return -EINVAL;
-
 		if (attr->alt_port_num == 0 ||
 		    attr->alt_port_num > dev->dev->caps.num_ports)
 			return -EINVAL;
 
+		if (attr->alt_pkey_index >=
+		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
+			return -EINVAL;
+
 		if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
 				  attr->alt_port_num))
 			return -EINVAL;
@@ -884,16 +892,19 @@
 
 	/*
 	 * Before passing a kernel QP to the HW, make sure that the
-	 * ownership bits of the send queue are set so that the
-	 * hardware doesn't start processing stale work requests.
+	 * ownership bits of the send queue are set and the SQ
+	 * headroom is stamped so that the hardware doesn't start
+	 * processing stale work requests.
 	 */
 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
 		struct mlx4_wqe_ctrl_seg *ctrl;
 		int i;
 
-		for (i = 0; i < qp->sq.max; ++i) {
+		for (i = 0; i < qp->sq.wqe_cnt; ++i) {
 			ctrl = get_send_wqe(qp, i);
 			ctrl->owner_opcode = cpu_to_be32(1 << 31);
+
+			stamp_send_wqe(qp, i);
 		}
 	}
 
@@ -923,7 +934,9 @@
 	 */
 	if (is_qp0(dev, qp)) {
 		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
-			init_port(dev, qp->port);
+			if (mlx4_INIT_PORT(dev->dev, qp->port))
+				printk(KERN_WARNING "INIT_PORT failed for port %d\n",
+				       qp->port);
 
 		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
 		    (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
@@ -986,16 +999,17 @@
 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
 		goto out;
 
-	if ((attr_mask & IB_QP_PKEY_INDEX) &&
-	     attr->pkey_index >= dev->dev->caps.pkey_table_len) {
-		goto out;
-	}
-
 	if ((attr_mask & IB_QP_PORT) &&
 	    (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
 		goto out;
 	}
 
+	if (attr_mask & IB_QP_PKEY_INDEX) {
+		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
+			goto out;
+	}
+
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
 	    attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
 		goto out;
@@ -1037,6 +1051,7 @@
 	u16 pkey;
 	int send_size;
 	int header_size;
+	int spc;
 	int i;
 
 	send_size = 0;
@@ -1112,10 +1127,43 @@
 		printk("\n");
 	}
 
-	inl->byte_count = cpu_to_be32(1 << 31 | header_size);
-	memcpy(inl + 1, sqp->header_buf, header_size);
+	/*
+	 * Inline data segments may not cross a 64 byte boundary.  If
+	 * our UD header is bigger than the space available up to the
+	 * next 64 byte boundary in the WQE, use two inline data
+	 * segments to hold the UD header.
+	 */
+	spc = MLX4_INLINE_ALIGN -
+		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
+	if (header_size <= spc) {
+		inl->byte_count = cpu_to_be32(1 << 31 | header_size);
+		memcpy(inl + 1, sqp->header_buf, header_size);
+		i = 1;
+	} else {
+		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+		memcpy(inl + 1, sqp->header_buf, spc);
 
-	return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+		inl = (void *) (inl + 1) + spc;
+		memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
+		/*
+		 * Need a barrier here to make sure all the data is
+		 * visible before the byte_count field is set.
+		 * Otherwise the HCA prefetcher could grab the 64-byte
+		 * chunk with this inline segment and get a valid (!=
+		 * 0xffffffff) byte count but stale data, and end up
+		 * generating a packet with bad headers.
+		 *
+		 * The first inline segment's byte_count field doesn't
+		 * need a barrier, because it comes after a
+		 * control/MLX segment and therefore is at an offset
+		 * of 16 mod 64.
+		 */
+		wmb();
+		inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
+		i = 2;
+	}
+
+	return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
 }
 
 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
@@ -1124,7 +1172,7 @@
 	struct mlx4_ib_cq *cq;
 
 	cur = wq->head - wq->tail;
-	if (likely(cur + nreq < wq->max))
+	if (likely(cur + nreq < wq->max_post))
 		return 0;
 
 	cq = to_mcq(ib_cq);
@@ -1132,7 +1180,7 @@
 	cur = wq->head - wq->tail;
 	spin_unlock(&cq->lock);
 
-	return cur + nreq >= wq->max;
+	return cur + nreq >= wq->max_post;
 }
 
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1165,8 +1213,8 @@
 			goto out;
 		}
 
-		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.max - 1));
-		qp->sq.wrid[ind & (qp->sq.max - 1)] = wr->wr_id;
+		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+		qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
 
 		ctrl->srcrb_flags =
 			(wr->send_flags & IB_SEND_SIGNALED ?
@@ -1301,7 +1349,16 @@
 		}
 
 		ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
-			(ind & qp->sq.max ? cpu_to_be32(1 << 31) : 0);
+			(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+
+		/*
+		 * We can improve latency by not stamping the last
+		 * send queue WQE until after ringing the doorbell, so
+		 * only stamp here if there are still more WQEs to post.
+		 */
+		if (wr->next)
+			stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) &
+				       (qp->sq.wqe_cnt - 1));
 
 		++ind;
 	}
@@ -1324,6 +1381,9 @@
 		 * and reach the HCA out of order.
 		 */
 		mmiowb();
+
+		stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) &
+			       (qp->sq.wqe_cnt - 1));
 	}
 
 	spin_unlock_irqrestore(&qp->rq.lock, flags);
@@ -1344,7 +1404,7 @@
 
 	spin_lock_irqsave(&qp->rq.lock, flags);
 
-	ind = qp->rq.head & (qp->rq.max - 1);
+	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {
@@ -1375,7 +1435,7 @@
 
 		qp->rq.wrid[ind] = wr->wr_id;
 
-		ind = (ind + 1) & (qp->rq.max - 1);
+		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
 	}
 
 out:
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 88c72d5..e2d11be 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -39,7 +39,7 @@
  * Increment this value if any changes that break userspace ABI
  * compatibility are made.
  */
-#define MLX4_IB_UVERBS_ABI_VERSION	2
+#define MLX4_IB_UVERBS_ABI_VERSION	3
 
 /*
  * Make sure that all structs defined in this file remain laid out so
@@ -87,9 +87,10 @@
 struct mlx4_ib_create_qp {
 	__u64	buf_addr;
 	__u64	db_addr;
-        __u8	log_sq_bb_count;
-        __u8	log_sq_stride;
-        __u8	reserved[6];
+	__u8	log_sq_bb_count;
+	__u8	log_sq_stride;
+	__u8	sq_no_prefetch;
+	__u8	reserved[5];
 };
 
 #endif /* MLX4_IB_USER_H */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index bd707b8..c97d5eb 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -164,6 +164,9 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called amikbd.
 
+config ATARI_KBD_CORE
+	bool
+
 config KEYBOARD_ATARI
 	tristate "Atari keyboard"
 	depends on ATARI
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index ee699a7..0852d33 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -2,7 +2,7 @@
 menuconfig MACINTOSH_DRIVERS
 	bool "Macintosh device drivers"
 	depends on PPC || MAC || X86
-	default y
+	default y if MAC
 
 if MACINTOSH_DRIVERS
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3a95cc5..46677d7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1240,17 +1240,24 @@
 			}
 		r1_bio->read_disk = primary;
 		for (i=0; i<mddev->raid_disks; i++)
-			if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
-			    test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
+			if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
 				int j;
 				int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
 				struct bio *pbio = r1_bio->bios[primary];
 				struct bio *sbio = r1_bio->bios[i];
-				for (j = vcnt; j-- ; )
-					if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
-						   page_address(sbio->bi_io_vec[j].bv_page),
-						   PAGE_SIZE))
-						break;
+
+				if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+					for (j = vcnt; j-- ; ) {
+						struct page *p, *s;
+						p = pbio->bi_io_vec[j].bv_page;
+						s = sbio->bi_io_vec[j].bv_page;
+						if (memcmp(page_address(p),
+							   page_address(s),
+							   PAGE_SIZE))
+							break;
+					}
+				} else
+					j = 0;
 				if (j >= 0)
 					mddev->resync_mismatches += r1_bio->sectors;
 				if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 82249a6..9eb66c1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1867,6 +1867,7 @@
 			int d = r10_bio->devs[i].devnum;
 			bio = r10_bio->devs[i].bio;
 			bio->bi_end_io = NULL;
+			clear_bit(BIO_UPTODATE, &bio->bi_flags);
 			if (conf->mirrors[d].rdev == NULL ||
 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
 				continue;
@@ -2037,6 +2038,11 @@
 	/* 'size' is now the number of chunks in the array */
 	/* calculate "used chunks per device" in 'stride' */
 	stride = size * conf->copies;
+
+	/* We need to round up when dividing by raid_disks to
+	 * get the stride size.
+	 */
+	stride += conf->raid_disks - 1;
 	sector_div(stride, conf->raid_disks);
 	mddev->size = stride  << (conf->chunk_shift-1);
 
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index d75f7ff..37bf653 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -727,13 +727,15 @@
 	struct _MPT_SCSI_HOST *hd =
 		(struct _MPT_SCSI_HOST *)sdev->host->hostdata;
 	VirtTarget *vtarget = scsi_target(sdev)->hostdata;
-	int ret = mptscsih_slave_configure(sdev);
+	int ret;
+
+	mptspi_initTarget(hd, vtarget, sdev);
+
+	ret = mptscsih_slave_configure(sdev);
 
 	if (ret)
 		return ret;
 
-	mptspi_initTarget(hd, vtarget, sdev);
-
 	ddvprintk((MYIOC_s_INFO_FMT "id=%d min_period=0x%02x"
 		" max_offset=0x%02x max_width=%d\n", hd->ioc->name,
 		sdev->id, spi_min_period(scsi_target(sdev)),
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 638a279..9853c74 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -240,7 +240,7 @@
 	 * TODO: probe for these or load them from PARAM
 	 */
 	netdev->base_addr = 0x4200;
-	netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 +
+	netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
 	              inl(mipsnet_reg_address(netdev, interruptInfo));
 
 	// Get the io region now, get irq on open()
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index e7ca118..d2b0653 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -38,7 +38,9 @@
 #include "icm.h"
 
 enum {
-	MLX4_COMMAND_INTERFACE_REV	= 1
+	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
+	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
+	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
 };
 
 extern void __buggy_use_of_MLX4_GET(void);
@@ -107,6 +109,7 @@
 	u16 size;
 	u16 stat_rate;
 	int err;
+	int i;
 
 #define QUERY_DEV_CAP_OUT_SIZE		       0x100
 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
@@ -176,7 +179,6 @@
 
 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
 			   MLX4_CMD_TIME_CLASS_A);
-
 	if (err)
 		goto out;
 
@@ -216,18 +218,10 @@
 	dev_cap->max_rdma_global = 1 << (field & 0x3f);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
 	dev_cap->local_ca_ack_delay = field & 0x1f;
-	MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
-	dev_cap->max_mtu	= field >> 4;
-	dev_cap->max_port_width = field & 0xf;
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
-	dev_cap->max_vl    = field >> 4;
 	dev_cap->num_ports = field & 0xf;
-	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
-	dev_cap->max_gids = 1 << (field & 0xf);
 	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
 	dev_cap->stat_rate_support = stat_rate;
-	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
-	dev_cap->max_pkeys = 1 << (field & 0xf);
 	MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
 	dev_cap->reserved_uars = field >> 4;
@@ -304,6 +298,42 @@
 	MLX4_GET(dev_cap->max_icm_sz, outbox,
 		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
 
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		for (i = 1; i <= dev_cap->num_ports; ++i) {
+			MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+			dev_cap->max_vl[i]	   = field >> 4;
+			MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
+			dev_cap->max_mtu[i]	   = field >> 4;
+			dev_cap->max_port_width[i] = field & 0xf;
+			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
+			dev_cap->max_gids[i]	   = 1 << (field & 0xf);
+			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
+			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
+		}
+	} else {
+#define QUERY_PORT_MTU_OFFSET			0x01
+#define QUERY_PORT_WIDTH_OFFSET			0x06
+#define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
+#define QUERY_PORT_MAX_VL_OFFSET		0x0b
+
+		for (i = 1; i <= dev_cap->num_ports; ++i) {
+			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
+					   MLX4_CMD_TIME_CLASS_B);
+			if (err)
+				goto out;
+
+			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
+			dev_cap->max_mtu[i]	   = field & 0xf;
+			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
+			dev_cap->max_port_width[i] = field & 0xf;
+			MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
+			dev_cap->max_gids[i]	   = 1 << (field >> 4);
+			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
+			MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
+			dev_cap->max_vl[i]	   = field & 0xf;
+		}
+	}
+
 	if (dev_cap->bmme_flags & 1)
 		mlx4_dbg(dev, "Base MM extensions: yes "
 			 "(flags %d, rsvd L_Key %08x)\n",
@@ -338,8 +368,8 @@
 	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
 		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
 	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
-		 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu,
-		 dev_cap->max_port_width);
+		 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
+		 dev_cap->max_port_width[1]);
 	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
 		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
 	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
@@ -491,7 +521,8 @@
 		((fw_ver & 0x0000ffffull) << 16);
 
 	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
-	if (cmd_if_rev != MLX4_COMMAND_INTERFACE_REV) {
+	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
+	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
 		mlx4_err(dev, "Installed FW has unsupported "
 			 "command interface revision %d.\n",
 			 cmd_if_rev);
@@ -499,12 +530,15 @@
 			 (int) (dev->caps.fw_ver >> 32),
 			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
 			 (int) dev->caps.fw_ver & 0xffff);
-		mlx4_err(dev, "This driver version supports only revision %d.\n",
-			 MLX4_COMMAND_INTERFACE_REV);
+		mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
 		err = -ENODEV;
 		goto out;
 	}
 
+	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
+		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
+
 	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
 	cmd->max_cmds = 1 << lg;
 
@@ -708,13 +742,15 @@
 	return err;
 }
 
-int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port)
+int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
 	struct mlx4_cmd_mailbox *mailbox;
 	u32 *inbox;
 	int err;
 	u32 flags;
+	u16 field;
 
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
 #define INIT_PORT_IN_SIZE          256
 #define INIT_PORT_FLAGS_OFFSET     0x00
 #define INIT_PORT_FLAG_SIG         (1 << 18)
@@ -729,32 +765,32 @@
 #define INIT_PORT_NODE_GUID_OFFSET 0x18
 #define INIT_PORT_SI_GUID_OFFSET   0x20
 
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
-	inbox = mailbox->buf;
+		mailbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(mailbox))
+			return PTR_ERR(mailbox);
+		inbox = mailbox->buf;
 
-	memset(inbox, 0, INIT_PORT_IN_SIZE);
+		memset(inbox, 0, INIT_PORT_IN_SIZE);
 
-	flags = 0;
-	flags |= param->set_guid0     ? INIT_PORT_FLAG_G0  : 0;
-	flags |= param->set_node_guid ? INIT_PORT_FLAG_NG  : 0;
-	flags |= param->set_si_guid   ? INIT_PORT_FLAG_SIG : 0;
-	flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT;
-	flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
-	MLX4_PUT(inbox, flags,            INIT_PORT_FLAGS_OFFSET);
+		flags = 0;
+		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
+		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
+		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
 
-	MLX4_PUT(inbox, param->mtu,       INIT_PORT_MTU_OFFSET);
-	MLX4_PUT(inbox, param->max_gid,   INIT_PORT_MAX_GID_OFFSET);
-	MLX4_PUT(inbox, param->max_pkey,  INIT_PORT_MAX_PKEY_OFFSET);
-	MLX4_PUT(inbox, param->guid0,     INIT_PORT_GUID0_OFFSET);
-	MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET);
-	MLX4_PUT(inbox, param->si_guid,   INIT_PORT_SI_GUID_OFFSET);
+		field = 128 << dev->caps.mtu_cap[port];
+		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
+		field = dev->caps.gid_table_len[port];
+		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
+		field = dev->caps.pkey_table_len[port];
+		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
 
-	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-		       MLX4_CMD_TIME_CLASS_A);
+		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
+			       MLX4_CMD_TIME_CLASS_A);
 
-	mlx4_free_cmd_mailbox(dev, mailbox);
+		mlx4_free_cmd_mailbox(dev, mailbox);
+	} else
+		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+			       MLX4_CMD_TIME_CLASS_A);
 
 	return err;
 }
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 2616fa5..296254a 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -59,13 +59,13 @@
 	int max_responder_per_qp;
 	int max_rdma_global;
 	int local_ca_ack_delay;
-	int max_mtu;
-	int max_port_width;
-	int max_vl;
 	int num_ports;
-	int max_gids;
+	int max_mtu[MLX4_MAX_PORTS + 1];
+	int max_port_width[MLX4_MAX_PORTS + 1];
+	int max_vl[MLX4_MAX_PORTS + 1];
+	int max_gids[MLX4_MAX_PORTS + 1];
+	int max_pkeys[MLX4_MAX_PORTS + 1];
 	u16 stat_rate_support;
-	int max_pkeys;
 	u32 flags;
 	int reserved_uars;
 	int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index d4172937..41eafeb 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -88,6 +88,7 @@
 static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
 	int err;
+	int i;
 
 	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
 	if (err) {
@@ -117,11 +118,15 @@
 	}
 
 	dev->caps.num_ports	     = dev_cap->num_ports;
+	for (i = 1; i <= dev->caps.num_ports; ++i) {
+		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
+		dev->caps.mtu_cap[i]	    = dev_cap->max_mtu[i];
+		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
+		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
+		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+	}
+
 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
-	dev->caps.vl_cap	     = dev_cap->max_vl;
-	dev->caps.mtu_cap	     = dev_cap->max_mtu;
-	dev->caps.gid_table_len	     = dev_cap->max_gids;
-	dev->caps.pkey_table_len     = dev_cap->max_pkeys;
 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
 	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
@@ -148,7 +153,6 @@
 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
 	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
-	dev->caps.port_width_cap     = dev_cap->max_port_width;
 	dev->caps.mtt_entry_sz	     = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
 	dev->caps.flags		     = dev_cap->flags;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 66eb068..4e711a9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -267,7 +267,9 @@
 	u64 tod;
 	cpuid_t cpu_id;
 	u32 arch_id;
+	u32 volnr;
 	u32 build_arch;
+	u64 rmem_size;
 	char pad2[4016];
 } __attribute__((packed,__aligned__(16)));
 
@@ -559,6 +561,7 @@
 	else
 		hdr->arch_id = DUMP_ARCH_S390;
 	hdr->mem_size = sys_info.mem_size;
+	hdr->rmem_size = sys_info.mem_size;
 	hdr->mem_end = sys_info.mem_size;
 	hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
 	hdr->tod = get_clock();
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index ec71061..71caf2d 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2033,6 +2033,7 @@
 			starget_for_each_device(tp->starget, NULL,
 						esp_clear_hold);
 	}
+	esp->flags &= ~ESP_FLAG_RESETTING;
 }
 
 /* Runs under host->lock */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 9804c0c..cc5efc1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -655,10 +655,9 @@
 static struct file_system_type fuseblk_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "fuseblk",
-	.fs_flags	= FS_HAS_SUBTYPE,
 	.get_sb		= fuse_get_sb_blk,
 	.kill_sb	= kill_block_super,
-	.fs_flags	= FS_REQUIRES_DEV,
+	.fs_flags	= FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
 };
 
 static inline int register_fuseblk(void)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index aa083dd..e6b46b3 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -736,15 +736,13 @@
 			can_do_mlock());
 }
 
-struct file *hugetlb_zero_setup(size_t size)
+struct file *hugetlb_file_setup(const char *name, size_t size)
 {
 	int error = -ENOMEM;
 	struct file *file;
 	struct inode *inode;
 	struct dentry *dentry, *root;
 	struct qstr quick_string;
-	char buf[16];
-	static atomic_t counter;
 
 	if (!hugetlbfs_vfsmount)
 		return ERR_PTR(-ENOENT);
@@ -756,8 +754,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	root = hugetlbfs_vfsmount->mnt_root;
-	snprintf(buf, 16, "%u", atomic_inc_return(&counter));
-	quick_string.name = buf;
+	quick_string.name = name;
 	quick_string.len = strlen(quick_string.name);
 	quick_string.hash = 0;
 	dentry = d_alloc(root, &quick_string);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 1f01294..bf7de0b 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -100,14 +100,23 @@
 	clear_inode(inode);
 }
 
+/*
+ * If we are going to release inode from memory, we discard preallocation and
+ * truncate last inode extent to proper length. We could use drop_inode() but
+ * it's called under inode_lock and thus we cannot mark inode dirty there.  We
+ * use clear_inode() but we have to make sure to write inode as it's not written
+ * automatically.
+ */
 void udf_clear_inode(struct inode *inode)
 {
 	if (!(inode->i_sb->s_flags & MS_RDONLY)) {
 		lock_kernel();
+		/* Discard preallocation for directories, symlinks, etc. */
 		udf_discard_prealloc(inode);
+		udf_truncate_tail_extent(inode);
 		unlock_kernel();
+		write_inode_now(inode, 1);
 	}
-
 	kfree(UDF_I_DATA(inode));
 	UDF_I_DATA(inode) = NULL;
 }
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 77975ae..60d2776 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -61,7 +61,11 @@
 	}
 }
 
-void udf_discard_prealloc(struct inode * inode)
+/*
+ * Truncate the last extent to match i_size. This function assumes
+ * that preallocation extent is already truncated.
+ */
+void udf_truncate_tail_extent(struct inode *inode)
 {
 	struct extent_position epos = { NULL, 0, {0, 0}};
 	kernel_lb_addr eloc;
@@ -71,6 +75,62 @@
 	int adsize;
 
 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ||
+	    inode->i_size == UDF_I_LENEXTENTS(inode))
+		return;
+	/* Are we going to delete the file anyway? */
+	if (inode->i_nlink == 0)
+		return;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
+		adsize = sizeof(short_ad);
+	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
+		adsize = sizeof(long_ad);
+	else
+		BUG();
+
+	/* Find the last extent in the file */
+	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
+	{
+		etype = netype;
+		lbcount += elen;
+		if (lbcount > inode->i_size) {
+			if (lbcount - inode->i_size >= inode->i_sb->s_blocksize)
+				printk(KERN_WARNING
+				       "udf_truncate_tail_extent(): Too long "
+				       "extent after EOF in inode %u: i_size: "
+				       "%Ld lbcount: %Ld extent %u+%u\n",
+				       (unsigned)inode->i_ino,
+				       (long long)inode->i_size,
+				       (long long)lbcount,
+				       (unsigned)eloc.logicalBlockNum,
+				       (unsigned)elen);
+			nelen = elen - (lbcount - inode->i_size);
+			epos.offset -= adsize;
+			extent_trunc(inode, &epos, eloc, etype, elen, nelen);
+			epos.offset += adsize;
+			if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1)
+				printk(KERN_ERR "udf_truncate_tail_extent(): "
+				       "Extent after EOF in inode %u.\n",
+				       (unsigned)inode->i_ino);
+			break;
+		}
+	}
+	/* This inode entry is in-memory only and thus we don't have to mark
+	 * the inode dirty */
+	UDF_I_LENEXTENTS(inode) = inode->i_size;
+	brelse(epos.bh);
+}
+
+void udf_discard_prealloc(struct inode *inode)
+{
+	struct extent_position epos = { NULL, 0, {0, 0}};
+	kernel_lb_addr eloc;
+	uint32_t elen;
+	uint64_t lbcount = 0;
+	int8_t etype = -1, netype;
+	int adsize;
+
+	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ||
 		inode->i_size == UDF_I_LENEXTENTS(inode))
 		return;
 
@@ -84,31 +144,18 @@
 	epos.block = UDF_I_LOCATION(inode);
 
 	/* Find the last extent in the file */
-	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
-	{
+	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
 		etype = netype;
 		lbcount += elen;
-		if (lbcount > inode->i_size && lbcount - elen < inode->i_size)
-		{
-			WARN_ON(lbcount - inode->i_size >= inode->i_sb->s_blocksize);
-			nelen = elen - (lbcount - inode->i_size);
-			epos.offset -= adsize;
-			extent_trunc(inode, &epos, eloc, etype, elen, nelen);
-			epos.offset += adsize;
-			lbcount = inode->i_size;
-		}
 	}
 	if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
 		epos.offset -= adsize;
 		lbcount -= elen;
 		extent_trunc(inode, &epos, eloc, etype, elen, 0);
-		if (!epos.bh)
-		{
+		if (!epos.bh) {
 			UDF_I_LENALLOC(inode) = epos.offset - udf_file_entry_alloc_offset(inode);
 			mark_inode_dirty(inode);
-		}
-		else
-		{
+		} else {
 			struct allocExtDesc *aed = (struct allocExtDesc *)(epos.bh->b_data);
 			aed->lengthAllocDescs = cpu_to_le32(epos.offset - sizeof(struct allocExtDesc));
 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
@@ -118,9 +165,9 @@
 			mark_buffer_dirty_inode(epos.bh, inode);
 		}
 	}
+	/* This inode entry is in-memory only and thus we don't have to mark
+	 * the inode dirty */
 	UDF_I_LENEXTENTS(inode) = lbcount;
-
-	WARN_ON(lbcount != inode->i_size);
 	brelse(epos.bh);
 }
 
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 67ded28..f581f2f 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -146,6 +146,7 @@
 extern struct inode * udf_new_inode (struct inode *, int, int *);
 
 /* truncate.c */
+extern void udf_truncate_tail_extent(struct inode *);
 extern void udf_discard_prealloc(struct inode *);
 extern void udf_truncate_extents(struct inode *);
 
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 86fb671..ed90403 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -159,7 +159,7 @@
 		if (status)
 			goto unlock;
 
-		memclear_highpage_flush(page, offset, bytes);
+		zero_user_page(page, offset, bytes, KM_USER0);
 
 		status = mapping->a_ops->commit_write(NULL, page, offset,
 							offset + bytes);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dc8f99e..7d7bcf9 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -27,13 +27,20 @@
  * Largely same as above, but only sets the access flags (dirty,
  * accessed, and writable). Furthermore, we know it always gets set
  * to a "more permissive" setting, which allows most architectures
- * to optimize this.
+ * to optimize this. We return whether the PTE actually changed, which
+ * in turn instructs the caller to do things like update__mmu_cache.
+ * This used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
  */
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do {				  					  \
-	set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry);	  \
-	flush_tlb_page(__vma, __address);				  \
-} while (0)
+({									  \
+	int __changed = !pte_same(*(__ptep), __entry);			  \
+	if (__changed) {						  \
+		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+		flush_tlb_page(__vma, __address);			  \
+	}								  \
+	__changed;							  \
+})
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 183eebe..f1d72d1 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -123,6 +123,8 @@
 	return 0;
 }
 
+extern int forbid_dac;
+
 static inline int
 dma_supported(struct device *dev, u64 mask)
 {
@@ -134,6 +136,10 @@
         if(mask < 0x00ffffff)
                 return 0;
 
+	/* Work around chipset bugs */
+	if (forbid_dac > 0 && mask > 0xffffffffULL)
+		return 0;
+
 	return 1;
 }
 
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 2394589..628fa77 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -285,32 +285,36 @@
  */
 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
-do {									\
-	if (dirty) {							\
+({									\
+	int __changed = !pte_same(*(ptep), entry);			\
+	if (__changed && dirty) {					\
 		(ptep)->pte_low = (entry).pte_low;			\
 		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
 		flush_tlb_page(vma, address);				\
 	}								\
-} while (0)
+	__changed;							\
+})
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
 #define ptep_test_and_clear_dirty(vma, addr, ptep) ({			\
-	int ret = 0;							\
-	if (pte_dirty(*ptep))						\
-		ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); \
-	if (ret)							\
-		pte_update_defer(vma->vm_mm, addr, ptep);		\
-	ret;								\
+	int __ret = 0;							\
+	if (pte_dirty(*(ptep)))						\
+		__ret = test_and_clear_bit(_PAGE_BIT_DIRTY,		\
+						&(ptep)->pte_low);	\
+	if (__ret)							\
+		pte_update((vma)->vm_mm, addr, ptep);			\
+	__ret;								\
 })
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define ptep_test_and_clear_young(vma, addr, ptep) ({			\
-	int ret = 0;							\
-	if (pte_young(*ptep))						\
-		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); \
-	if (ret)							\
-		pte_update_defer(vma->vm_mm, addr, ptep);		\
-	ret;								\
+	int __ret = 0;							\
+	if (pte_young(*(ptep)))						\
+		__ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,		\
+						&(ptep)->pte_low);	\
+	if (__ret)							\
+		pte_update((vma)->vm_mm, addr, ptep);			\
+	__ret;								\
 })
 
 /*
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 670b706..6580f31 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -533,16 +533,23 @@
  * daccess_bit in ivt.S).
  */
 #ifdef CONFIG_SMP
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable)	\
-do {											\
-	if (__safely_writable) {							\
-		set_pte(__ptep, __entry);						\
-		flush_tlb_page(__vma, __addr);						\
-	}										\
-} while (0)
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({									\
+	int __changed = !pte_same(*(__ptep), __entry);			\
+	if (__changed && __safely_writable) {				\
+		set_pte(__ptep, __entry);				\
+		flush_tlb_page(__vma, __addr);				\
+	}								\
+	__changed;							\
+})
 #else
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable)	\
-	ptep_establish(__vma, __addr, __ptep, __entry)
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({									\
+	int __changed = !pte_same(*(__ptep), __entry);			\
+	if (__changed)							\
+		ptep_establish(__vma, __addr, __ptep, __entry);		\
+	__changed;							\
+})
 #endif
 
 #  ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 91803ba..3ca6a07 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -72,4 +72,13 @@
 extern void alloc_legacy_irqno(void);
 extern void free_irqno(unsigned int irq);
 
+/*
+ * Before R2 the timer and performance counter interrupts were both fixed to
+ * IE7.  Since R2 their number has to be read from the c0_intctl register.
+ */
+#define CP0_LEGACY_COMPARE_IRQ 7
+
+extern int cp0_compare_irq;
+extern int cp0_perfcount_irq;
+
 #endif /* _ASM_IRQ_H */
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h
index 76add42..93ba1c1 100644
--- a/include/asm-mips/mips-boards/atlasint.h
+++ b/include/asm-mips/mips-boards/atlasint.h
@@ -28,11 +28,6 @@
 
 #include <irq.h>
 
-/*
- * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode)
- */
-#define MIPSCPU_INT_BASE	MIPS_CPU_IRQ_BASE
-
 /* CPU interrupt offsets */
 #define MIPSCPU_INT_SW0		0
 #define MIPSCPU_INT_SW1		1
@@ -42,7 +37,6 @@
 #define MIPSCPU_INT_MB2		4
 #define MIPSCPU_INT_MB3		5
 #define MIPSCPU_INT_MB4		6
-#define MIPSCPU_INT_CPUCTR	7
 
 /*
  * Interrupts 8..39 are used for Atlas interrupt controller interrupts
diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h
index 9180d64..7461318 100644
--- a/include/asm-mips/mips-boards/maltaint.h
+++ b/include/asm-mips/mips-boards/maltaint.h
@@ -32,11 +32,6 @@
  */
 #define MALTA_INT_BASE		0
 
-/*
- * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode)
- */
-#define MIPSCPU_INT_BASE	MIPS_CPU_IRQ_BASE
-
 /* CPU interrupt offsets */
 #define MIPSCPU_INT_SW0		0
 #define MIPSCPU_INT_SW1		1
@@ -49,7 +44,6 @@
 #define MIPSCPU_INT_COREHI	MIPSCPU_INT_MB3
 #define MIPSCPU_INT_MB4		6
 #define MIPSCPU_INT_CORELO	MIPSCPU_INT_MB4
-#define MIPSCPU_INT_CPUCTR	7
 
 /*
  * Interrupts 64..127 are used for Soc-it Classic interrupts
diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h
index 4f6a393..e710bae 100644
--- a/include/asm-mips/mips-boards/seadint.h
+++ b/include/asm-mips/mips-boards/seadint.h
@@ -22,14 +22,7 @@
 
 #include <irq.h>
 
-/*
- * Interrupts 0..7 are used for SEAD CPU interrupts
- */
-#define MIPSCPU_INT_BASE	MIPS_CPU_IRQ_BASE
-
 #define MIPSCPU_INT_UART0	2
 #define MIPSCPU_INT_UART1	3
 
-#define MIPSCPU_INT_CPUCTR	7
-
 #endif /* !(_MIPS_SEADINT_H) */
diff --git a/include/asm-mips/mips-boards/simint.h b/include/asm-mips/mips-boards/simint.h
index 54f2fe6..8ef6db7 100644
--- a/include/asm-mips/mips-boards/simint.h
+++ b/include/asm-mips/mips-boards/simint.h
@@ -21,15 +21,11 @@
 
 #define SIM_INT_BASE		0
 #define MIPSCPU_INT_MB0		2
-#define MIPSCPU_INT_BASE	MIPS_CPU_IRQ_BASE
 #define MIPS_CPU_TIMER_IRQ	7
 
 
-#define MIPSCPU_INT_CPUCTR	7
-
 #define MSC01E_INT_BASE		64
 
-#define MIPSCPU_INT_CPUCTR	7
 #define MSC01E_INT_CPUCTR	11
 
 #endif
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index c863bdb..7fb730c 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -673,10 +673,14 @@
 }
 
 #define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-	do {								   \
-		__ptep_set_access_flags(__ptep, __entry, __dirty);	   \
-		flush_tlb_page_nohash(__vma, __address);	       	   \
-	} while(0)
+({									   \
+	int __changed = !pte_same(*(__ptep), __entry);			   \
+	if (__changed) {						   \
+		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \
+		flush_tlb_page_nohash(__vma, __address);		   \
+	}								   \
+	__changed;							   \
+})
 
 /*
  * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 704c4e6..3cfd98f 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -413,10 +413,14 @@
 	:"cc");
 }
 #define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-	do {								   \
-		__ptep_set_access_flags(__ptep, __entry, __dirty);	   \
-		flush_tlb_page_nohash(__vma, __address);	       	   \
-	} while(0)
+({									   \
+	int __changed = !pte_same(*(__ptep), __entry);			   \
+	if (__changed) {						   \
+		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \
+		flush_tlb_page_nohash(__vma, __address);		   \
+	}								   \
+	__changed;							   \
+})
 
 /*
  * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index bed452d..9d0ce9f 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -694,10 +694,14 @@
 }
 
 #define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-	do {								   \
-		__ptep_set_access_flags(__ptep, __entry, __dirty);	   \
-		flush_tlb_page_nohash(__vma, __address);	       	   \
-	} while(0)
+({									   \
+	int __changed = !pte_same(*(__ptep), __entry);			   \
+	if (__changed) {						   \
+		__ptep_set_access_flags(__ptep, __entry, __dirty);    	   \
+		flush_tlb_page_nohash(__vma, __address);		   \
+	}								   \
+	__changed;							   \
+})
 
 /*
  * Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 8fe8d42..0a307bb 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -744,7 +744,12 @@
 }
 
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-	ptep_establish(__vma, __address, __ptep, __entry)
+({									  \
+	int __changed = !pte_same(*(__ptep), __entry);			  \
+	if (__changed)							  \
+		ptep_establish(__vma, __address, __ptep, __entry);	  \
+	__changed;							  \
+})
 
 /*
  * Test and clear dirty bit in storage key.
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index e0fcea8..5cb480a 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -216,6 +216,11 @@
 	barrier();
 }
 
+static inline void psw_set_key(unsigned int key)
+{
+	asm volatile("spka 0(%0)" : : "d" (key));
+}
+
 /*
  * Set PSW to specified value.
  */
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index fa6ca87..332ee73 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -470,14 +470,7 @@
 #define regs_return_value(regs)((regs)->gprs[2])
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs * regs);
-#endif
-
-static inline void
-psw_set_key(unsigned int key)
-{
-	asm volatile("spka 0(%0)" : : "d" (key));
-}
-
+#endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
 
 #endif /* _S390_PTRACE_H */
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 4f0a5ba..59229ae 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -446,6 +446,17 @@
 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
 
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({									  \
+	int __changed = !pte_same(*(__ptep), __entry);			  \
+	if (__changed) {						  \
+		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+		flush_tlb_page(__vma, __address);			  \
+	}								  \
+	(sparc_cpu_model == sun4c) || __changed;			  \
+})
+
 #include <asm-generic/pgtable.h>
 
 #endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
index 50cee7b..7016b89 100644
--- a/include/asm-um/a.out.h
+++ b/include/asm-um/a.out.h
@@ -5,6 +5,7 @@
 #include "choose-mode.h"
 
 #undef STACK_TOP
+#undef STACK_TOP_MAX
 
 extern unsigned long stacksizelim;
 
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 08b9831..0a71e0b 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -395,12 +395,14 @@
  * bit at the same time. */
 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-	do {								  \
-		if (__dirty) {						  \
-			set_pte(__ptep, __entry);			  \
-			flush_tlb_page(__vma, __address);		  \
-		}							  \
-	} while (0)
+({									  \
+	int __changed = !pte_same(*(__ptep), __entry);			  \
+	if (__changed && __dirty) {					  \
+		set_pte(__ptep, __entry);			  	  \
+		flush_tlb_page(__vma, __address);		  	  \
+	}								  \
+	__changed;							  \
+})
 
 /* Encode and de-code a swap entry */
 #define __swp_type(x)			(((x).val >> 1) & 0x3f)
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index ae1ed05..8696f8a 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -626,9 +626,9 @@
 __SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
 #define __NR_signalfd		282
 __SYSCALL(__NR_signalfd, sys_signalfd)
-#define __NR_timerfd		282
+#define __NR_timerfd		283
 __SYSCALL(__NR_timerfd, sys_timerfd)
-#define __NR_eventfd		283
+#define __NR_eventfd		284
 __SYSCALL(__NR_eventfd, sys_eventfd)
 
 #ifndef __NO_STUBS
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 899fc7f..9965035 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -17,7 +17,6 @@
 #define FUTEX_LOCK_PI		6
 #define FUTEX_UNLOCK_PI		7
 #define FUTEX_TRYLOCK_PI	8
-#define FUTEX_CMP_REQUEUE_PI	9
 
 #define FUTEX_PRIVATE_FLAG	128
 #define FUTEX_CMD_MASK		~FUTEX_PRIVATE_FLAG
@@ -98,14 +97,9 @@
 #define FUTEX_OWNER_DIED	0x40000000
 
 /*
- * Some processes have been requeued on this PI-futex
- */
-#define FUTEX_WAITER_REQUEUED	0x20000000
-
-/*
  * The rest of the robust-futex field is for the TID:
  */
-#define FUTEX_TID_MASK		0x0fffffff
+#define FUTEX_TID_MASK		0x3fffffff
 
 /*
  * This limit protects against a deliberately circular list.
@@ -139,7 +133,6 @@
 #define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
 
 union futex_key {
-	u32 __user *uaddr;
 	struct {
 		unsigned long pgoff;
 		struct inode *inode;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b4570b6..2c13715 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -163,7 +163,7 @@
 
 extern const struct file_operations hugetlbfs_file_operations;
 extern struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_zero_setup(size_t);
+struct file *hugetlb_file_setup(const char *name, size_t);
 int hugetlb_get_quota(struct address_space *mapping);
 void hugetlb_put_quota(struct address_space *mapping);
 
@@ -185,7 +185,7 @@
 
 #define is_file_hugepages(file)		0
 #define set_file_hugepages(file)	BUG()
-#define hugetlb_zero_setup(size)	ERR_PTR(-ENOSYS)
+#define hugetlb_file_setup(name,size)	ERR_PTR(-ENOSYS)
 
 #endif /* !CONFIG_HUGETLBFS */
 
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 4fb552d..7d1eaa97 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -54,6 +54,7 @@
 	MLX4_CMD_INIT_PORT	 = 0x9,
 	MLX4_CMD_CLOSE_PORT	 = 0xa,
 	MLX4_CMD_QUERY_HCA	 = 0xb,
+	MLX4_CMD_QUERY_PORT	 = 0x43,
 	MLX4_CMD_SET_PORT	 = 0xc,
 	MLX4_CMD_ACCESS_DDR	 = 0x2e,
 	MLX4_CMD_MAP_ICM	 = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8c5f8fd..b372f59 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -41,6 +41,7 @@
 
 enum {
 	MLX4_FLAG_MSI_X		= 1 << 0,
+	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
 };
 
 enum {
@@ -131,10 +132,10 @@
 struct mlx4_caps {
 	u64			fw_ver;
 	int			num_ports;
-	int			vl_cap;
-	int			mtu_cap;
-	int			gid_table_len;
-	int			pkey_table_len;
+	int			vl_cap[MLX4_MAX_PORTS + 1];
+	int			mtu_cap[MLX4_MAX_PORTS + 1];
+	int			gid_table_len[MLX4_MAX_PORTS + 1];
+	int			pkey_table_len[MLX4_MAX_PORTS + 1];
 	int			local_ca_ack_delay;
 	int			num_uars;
 	int			bf_reg_size;
@@ -174,7 +175,7 @@
 	u32			page_size_cap;
 	u32			flags;
 	u16			stat_rate_support;
-	u8			port_width_cap;
+	u8			port_width_cap[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_buf_list {
@@ -322,7 +323,7 @@
 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
 
-int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port);
+int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9eeb61a..10c57d2 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -269,6 +269,10 @@
 	__be64			addr;
 };
 
+enum {
+	MLX4_INLINE_ALIGN	= 64,
+};
+
 struct mlx4_wqe_inline_seg {
 	__be32			byte_count;
 };
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a0ad374..6207a3d 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -28,7 +28,7 @@
 	int size;		/* The size of an object including meta data */
 	int objsize;		/* The size of an object without meta data */
 	int offset;		/* Free pointer offset. */
-	unsigned int order;
+	int order;
 
 	/*
 	 * Avoid an extra cache line for UP, SMP and for the node local to
@@ -56,7 +56,13 @@
 /*
  * Kmalloc subsystem.
  */
-#define KMALLOC_SHIFT_LOW 3
+#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
+
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
 
 /*
  * We keep the general caches in an array of slab caches that are used for
@@ -76,6 +82,9 @@
 	if (size > KMALLOC_MAX_SIZE)
 		return -1;
 
+	if (size <= KMALLOC_MIN_SIZE)
+		return KMALLOC_SHIFT_LOW;
+
 	if (size > 64 && size <= 96)
 		return 1;
 	if (size > 128 && size <= 192)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index b6bedc3..1be5ea0 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -341,9 +341,14 @@
  * chip transactions together.
  *
  * (ii) When the transfer is the last one in the message, the chip may
- * stay selected until the next transfer.  This is purely a performance
- * hint; the controller driver may need to select a different device
- * for the next message.
+ * stay selected until the next transfer.  On multi-device SPI busses
+ * with nothing blocking messages going to other devices, this is just
+ * a performance hint; starting a message to another device deselects
+ * this one.  But in other cases, this can be used to ensure correctness.
+ * Some devices need protocol transactions to be built from a series of
+ * spi_message submissions, where the content of one message is determined
+ * by the results of previous messages and where the whole transaction
+ * ends when the chipselect goes intactive.
  *
  * The code that submits an spi_message (and its spi_transfers)
  * to the lower layers is responsible for managing its memory.
@@ -480,14 +485,15 @@
 /**
  * spi_setup - setup SPI mode and clock rate
  * @spi: the device whose settings are being modified
- * Context: can sleep
+ * Context: can sleep, and no requests are queued to the device
  *
  * SPI protocol drivers may need to update the transfer mode if the
- * device doesn't work with the mode 0 default.  They may likewise need
+ * device doesn't work with its default.  They may likewise need
  * to update clock rates or word sizes from initial values.  This function
  * changes those settings, and must be called from a context that can sleep.
- * The changes take effect the next time the device is selected and data
- * is transferred to or from it.
+ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+ * effect the next time the device is selected and data is transferred to
+ * or from it.  When this function returns, the spi device is deselected.
  *
  * Note that this call will fail if the protocol driver specifies an option
  * that the underlying controller or its driver does not support.  For
diff --git a/ipc/shm.c b/ipc/shm.c
index 4fefbad..0852f20 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -254,8 +254,10 @@
 
 	if (sfd->vm_ops->get_policy)
 		pol = sfd->vm_ops->get_policy(vma, addr);
-	else
+	else if (vma->vm_policy)
 		pol = vma->vm_policy;
+	else
+		pol = current->mempolicy;
 	return pol;
 }
 #endif
@@ -364,9 +366,10 @@
 		return error;
 	}
 
+	sprintf (name, "SYSV%08x", key);
 	if (shmflg & SHM_HUGETLB) {
-		/* hugetlb_zero_setup takes care of mlock user accounting */
-		file = hugetlb_zero_setup(size);
+		/* hugetlb_file_setup takes care of mlock user accounting */
+		file = hugetlb_file_setup(name, size);
 		shp->mlock_user = current->user;
 	} else {
 		int acctflag = VM_ACCOUNT;
@@ -377,7 +380,6 @@
 		if  ((shmflg & SHM_NORESERVE) &&
 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
 			acctflag = 0;
-		sprintf (name, "SYSV%08x", key);
 		file = shmem_file_setup(name, size, acctflag);
 	}
 	error = PTR_ERR(file);
@@ -397,6 +399,11 @@
 	shp->shm_nattch = 0;
 	shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
 	shp->shm_file = file;
+	/*
+	 * shmid gets reported as "inode#" in /proc/pid/maps.
+	 * proc-ps tools use this. Changing this will break them.
+	 */
+	file->f_dentry->d_inode->i_ino = shp->id;
 
 	ns->shm_tot += numpages;
 	shm_unlock(shp);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f57854b..4c49188 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1682,9 +1682,9 @@
 
 	do_each_thread(g, p) {
 		if (p->cpuset == cs) {
-			pidarray[n++] = p->pid;
 			if (unlikely(n == npids))
 				goto array_full;
+			pidarray[n++] = p->pid;
 		}
 	} while_each_thread(g, p);
 
diff --git a/kernel/futex.c b/kernel/futex.c
index 3b7f771..df248f5 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -56,12 +56,6 @@
 
 #include "rtmutex_common.h"
 
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# include "rtmutex-debug.h"
-#else
-# include "rtmutex.h"
-#endif
-
 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
 
 /*
@@ -111,12 +105,6 @@
 	/* Optional priority inheritance state: */
 	struct futex_pi_state *pi_state;
 	struct task_struct *task;
-
-	/*
-	 * This waiter is used in case of requeue from a
-	 * normal futex to a PI-futex
-	 */
-	struct rt_mutex_waiter waiter;
 };
 
 /*
@@ -216,9 +204,6 @@
 	if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
 		return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
 
-	/* Save the user address in the ley */
-	key->uaddr = uaddr;
-
 	/*
 	 * Private mappings are handled in a simple way.
 	 *
@@ -636,8 +621,6 @@
 		int ret = 0;
 
 		newval = FUTEX_WAITERS | new_owner->pid;
-		/* Keep the FUTEX_WAITER_REQUEUED flag if it was set */
-		newval |= (uval & FUTEX_WAITER_REQUEUED);
 
 		pagefault_disable();
 		curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
@@ -750,259 +733,6 @@
 }
 
 /*
- * Called from futex_requeue_pi.
- * Set FUTEX_WAITERS and FUTEX_WAITER_REQUEUED flags on the
- * PI-futex value; search its associated pi_state if an owner exist
- * or create a new one without owner.
- */
-static inline int
-lookup_pi_state_for_requeue(u32 __user *uaddr, struct futex_hash_bucket *hb,
-			    union futex_key *key,
-			    struct futex_pi_state **pi_state)
-{
-	u32 curval, uval, newval;
-
-retry:
-	/*
-	 * We can't handle a fault cleanly because we can't
-	 * release the locks here. Simply return the fault.
-	 */
-	if (get_futex_value_locked(&curval, uaddr))
-		return -EFAULT;
-
-	/* set the flags FUTEX_WAITERS and FUTEX_WAITER_REQUEUED */
-	if ((curval & (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED))
-	    != (FUTEX_WAITERS | FUTEX_WAITER_REQUEUED)) {
-		/*
-		 * No waiters yet, we prepare the futex to have some waiters.
-		 */
-
-		uval = curval;
-		newval = uval | FUTEX_WAITERS | FUTEX_WAITER_REQUEUED;
-
-		pagefault_disable();
-		curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-		pagefault_enable();
-
-		if (unlikely(curval == -EFAULT))
-			return -EFAULT;
-		if (unlikely(curval != uval))
-			goto retry;
-	}
-
-	if (!(curval & FUTEX_TID_MASK)
-	    || lookup_pi_state(curval, hb, key, pi_state)) {
-		/* the futex has no owner (yet) or the lookup failed:
-		   allocate one pi_state without owner */
-
-		*pi_state = alloc_pi_state();
-
-		/* Already stores the key: */
-		(*pi_state)->key = *key;
-
-		/* init the mutex without owner */
-		__rt_mutex_init(&(*pi_state)->pi_mutex, NULL);
-	}
-
-	return 0;
-}
-
-/*
- * Keep the first nr_wake waiter from futex1, wake up one,
- * and requeue the next nr_requeue waiters following hashed on
- * one physical page to another physical page (PI-futex uaddr2)
- */
-static int futex_requeue_pi(u32 __user *uaddr1,
-			    struct rw_semaphore *fshared,
-			    u32 __user *uaddr2,
-			    int nr_wake, int nr_requeue, u32 *cmpval)
-{
-	union futex_key key1, key2;
-	struct futex_hash_bucket *hb1, *hb2;
-	struct plist_head *head1;
-	struct futex_q *this, *next;
-	struct futex_pi_state *pi_state2 = NULL;
-	struct rt_mutex_waiter *waiter, *top_waiter = NULL;
-	struct rt_mutex *lock2 = NULL;
-	int ret, drop_count = 0;
-
-	if (refill_pi_state_cache())
-		return -ENOMEM;
-
-retry:
-	/*
-	 * First take all the futex related locks:
-	 */
-	if (fshared)
-		down_read(fshared);
-
-	ret = get_futex_key(uaddr1, fshared, &key1);
-	if (unlikely(ret != 0))
-		goto out;
-	ret = get_futex_key(uaddr2, fshared, &key2);
-	if (unlikely(ret != 0))
-		goto out;
-
-	hb1 = hash_futex(&key1);
-	hb2 = hash_futex(&key2);
-
-	double_lock_hb(hb1, hb2);
-
-	if (likely(cmpval != NULL)) {
-		u32 curval;
-
-		ret = get_futex_value_locked(&curval, uaddr1);
-
-		if (unlikely(ret)) {
-			spin_unlock(&hb1->lock);
-			if (hb1 != hb2)
-				spin_unlock(&hb2->lock);
-
-			/*
-			 * If we would have faulted, release mmap_sem, fault
-			 * it in and start all over again.
-			 */
-			if (fshared)
-				up_read(fshared);
-
-			ret = get_user(curval, uaddr1);
-
-			if (!ret)
-				goto retry;
-
-			return ret;
-		}
-		if (curval != *cmpval) {
-			ret = -EAGAIN;
-			goto out_unlock;
-		}
-	}
-
-	head1 = &hb1->chain;
-	plist_for_each_entry_safe(this, next, head1, list) {
-		if (!match_futex (&this->key, &key1))
-			continue;
-		if (++ret <= nr_wake) {
-			wake_futex(this);
-		} else {
-			/*
-			 * FIRST: get and set the pi_state
-			 */
-			if (!pi_state2) {
-				int s;
-				/* do this only the first time we requeue someone */
-				s = lookup_pi_state_for_requeue(uaddr2, hb2,
-								&key2, &pi_state2);
-				if (s) {
-					ret = s;
-					goto out_unlock;
-				}
-
-				lock2 = &pi_state2->pi_mutex;
-				spin_lock(&lock2->wait_lock);
-
-				/* Save the top waiter of the wait_list */
-				if (rt_mutex_has_waiters(lock2))
-					top_waiter = rt_mutex_top_waiter(lock2);
-			} else
-				atomic_inc(&pi_state2->refcount);
-
-
-			this->pi_state = pi_state2;
-
-			/*
-			 * SECOND: requeue futex_q to the correct hashbucket
-			 */
-
-			/*
-			 * If key1 and key2 hash to the same bucket, no need to
-			 * requeue.
-			 */
-			if (likely(head1 != &hb2->chain)) {
-				plist_del(&this->list, &hb1->chain);
-				plist_add(&this->list, &hb2->chain);
-				this->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-				this->list.plist.lock = &hb2->lock;
-#endif
-			}
-			this->key = key2;
-			get_futex_key_refs(&key2);
-			drop_count++;
-
-
-			/*
-			 * THIRD: queue it to lock2
-			 */
-			spin_lock_irq(&this->task->pi_lock);
-			waiter = &this->waiter;
-			waiter->task = this->task;
-			waiter->lock = lock2;
-			plist_node_init(&waiter->list_entry, this->task->prio);
-			plist_node_init(&waiter->pi_list_entry, this->task->prio);
-			plist_add(&waiter->list_entry, &lock2->wait_list);
-			this->task->pi_blocked_on = waiter;
-			spin_unlock_irq(&this->task->pi_lock);
-
-			if (ret - nr_wake >= nr_requeue)
-				break;
-		}
-	}
-
-	/* If we've requeued some tasks and the top_waiter of the rt_mutex
-	   has changed, we must adjust the priority of the owner, if any */
-	if (drop_count) {
-		struct task_struct *owner = rt_mutex_owner(lock2);
-		if (owner &&
-		    (top_waiter != (waiter = rt_mutex_top_waiter(lock2)))) {
-			int chain_walk = 0;
-
-			spin_lock_irq(&owner->pi_lock);
-			if (top_waiter)
-				plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
-			else
-				/*
-				 * There was no waiters before the requeue,
-				 * the flag must be updated
-				 */
-				mark_rt_mutex_waiters(lock2);
-
-			plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
-			__rt_mutex_adjust_prio(owner);
-			if (owner->pi_blocked_on) {
-				chain_walk = 1;
-				get_task_struct(owner);
-			}
-
-			spin_unlock_irq(&owner->pi_lock);
-			spin_unlock(&lock2->wait_lock);
-
-			if (chain_walk)
-				rt_mutex_adjust_prio_chain(owner, 0, lock2, NULL,
-							   current);
-		} else {
-			/* No owner or the top_waiter does not change */
-			mark_rt_mutex_waiters(lock2);
-			spin_unlock(&lock2->wait_lock);
-		}
-	}
-
-out_unlock:
-	spin_unlock(&hb1->lock);
-	if (hb1 != hb2)
-		spin_unlock(&hb2->lock);
-
-	/* drop_futex_key_refs() must be called outside the spinlocks. */
-	while (--drop_count >= 0)
-		drop_futex_key_refs(&key1);
-
-out:
-	if (fshared)
-		up_read(fshared);
-	return ret;
-}
-
-/*
  * Wake up all waiters hashed on the physical page that is mapped
  * to this virtual address:
  */
@@ -1384,7 +1114,6 @@
 
 	while (!ret) {
 		newval = (uval & FUTEX_OWNER_DIED) | newtid;
-		newval |= (uval & FUTEX_WAITER_REQUEUED);
 
 		pagefault_disable();
 		curval = futex_atomic_cmpxchg_inatomic(uaddr,
@@ -1416,7 +1145,7 @@
 	struct futex_q q;
 	u32 uval;
 	int ret;
-	struct hrtimer_sleeper t, *to = NULL;
+	struct hrtimer_sleeper t;
 	int rem = 0;
 
 	q.pi_state = NULL;
@@ -1472,14 +1201,6 @@
 	if (uval != val)
 		goto out_unlock_release_sem;
 
-	/*
-	 * This rt_mutex_waiter structure is prepared here and will
-	 * be used only if this task is requeued from a normal futex to
-	 * a PI-futex with futex_requeue_pi.
-	 */
-	debug_rt_mutex_init_waiter(&q.waiter);
-	q.waiter.task = NULL;
-
 	/* Only actually queue if *uaddr contained val.  */
 	__queue_me(&q, hb);
 
@@ -1510,7 +1231,6 @@
 		if (!abs_time)
 			schedule();
 		else {
-			to = &t;
 			hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 			hrtimer_init_sleeper(&t, current);
 			t.timer.expires = *abs_time;
@@ -1538,67 +1258,6 @@
 	 * we are the only user of it.
 	 */
 
-	if (q.pi_state) {
-		/*
-		 * We were woken but have been requeued on a PI-futex.
-		 * We have to complete the lock acquisition by taking
-		 * the rtmutex.
-		 */
-
-		struct rt_mutex *lock = &q.pi_state->pi_mutex;
-
-		spin_lock(&lock->wait_lock);
-		if (unlikely(q.waiter.task)) {
-			remove_waiter(lock, &q.waiter);
-		}
-		spin_unlock(&lock->wait_lock);
-
-		if (rem)
-			ret = -ETIMEDOUT;
-		else
-			ret = rt_mutex_timed_lock(lock, to, 1);
-
-		if (fshared)
-			down_read(fshared);
-		spin_lock(q.lock_ptr);
-
-		/*
-		 * Got the lock. We might not be the anticipated owner if we
-		 * did a lock-steal - fix up the PI-state in that case.
-		 */
-		if (!ret && q.pi_state->owner != curr) {
-			/*
-			 * We MUST play with the futex we were requeued on,
-			 * NOT the current futex.
-			 * We can retrieve it from the key of the pi_state
-			 */
-			uaddr = q.pi_state->key.uaddr;
-
-			ret = fixup_pi_state_owner(uaddr, &q, curr);
-		} else {
-			/*
-			 * Catch the rare case, where the lock was released
-			 * when we were on the way back before we locked
-			 * the hash bucket.
-			 */
-			if (ret && q.pi_state->owner == curr) {
-				if (rt_mutex_trylock(&q.pi_state->pi_mutex))
-					ret = 0;
-			}
-		}
-
-		/* Unqueue and drop the lock */
-		unqueue_me_pi(&q);
-		if (fshared)
-			up_read(fshared);
-
-		debug_rt_mutex_free_waiter(&q.waiter);
-
-		return ret;
-	}
-
-	debug_rt_mutex_free_waiter(&q.waiter);
-
 	/* If we were woken (and unqueued), we succeeded, whatever. */
 	if (!unqueue_me(&q))
 		return 0;
@@ -1648,51 +1307,6 @@
 }
 
 
-static void set_pi_futex_owner(struct futex_hash_bucket *hb,
-			       union futex_key *key, struct task_struct *p)
-{
-	struct plist_head *head;
-	struct futex_q *this, *next;
-	struct futex_pi_state *pi_state = NULL;
-	struct rt_mutex *lock;
-
-	/* Search a waiter that should already exists */
-
-	head = &hb->chain;
-
-	plist_for_each_entry_safe(this, next, head, list) {
-		if (match_futex (&this->key, key)) {
-			pi_state = this->pi_state;
-			break;
-		}
-	}
-
-	BUG_ON(!pi_state);
-
-	/* set p as pi_state's owner */
-	lock = &pi_state->pi_mutex;
-
-	spin_lock(&lock->wait_lock);
-	spin_lock_irq(&p->pi_lock);
-
-	list_add(&pi_state->list, &p->pi_state_list);
-	pi_state->owner = p;
-
-
-	/* set p as pi_mutex's owner */
-	debug_rt_mutex_proxy_lock(lock, p);
-	WARN_ON(rt_mutex_owner(lock));
-	rt_mutex_set_owner(lock, p, 0);
-	rt_mutex_deadlock_account_lock(lock, p);
-
-	plist_add(&rt_mutex_top_waiter(lock)->pi_list_entry,
-		  &p->pi_waiters);
-	__rt_mutex_adjust_prio(p);
-
-	spin_unlock_irq(&p->pi_lock);
-	spin_unlock(&lock->wait_lock);
-}
-
 /*
  * Userspace tried a 0 -> TID atomic transition of the futex value
  * and failed. The kernel side here does the whole locking operation:
@@ -1753,8 +1367,7 @@
 	 * situation and we return success to user space.
 	 */
 	if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
-		if (!(curval & FUTEX_WAITER_REQUEUED))
-			ret = -EDEADLK;
+		ret = -EDEADLK;
 		goto out_unlock_release_sem;
 	}
 
@@ -1774,14 +1387,14 @@
 
 	/*
 	 * There are two cases, where a futex might have no owner (the
-	 * owner TID is 0): OWNER_DIED or REQUEUE. We take over the
-	 * futex in this case. We also do an unconditional take over,
-	 * when the owner of the futex died.
+	 * owner TID is 0): OWNER_DIED. We take over the futex in this
+	 * case. We also do an unconditional take over, when the owner
+	 * of the futex died.
 	 *
 	 * This is safe as we are protected by the hash bucket lock !
 	 */
 	if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
-		/* Keep the OWNER_DIED and REQUEUE bits */
+		/* Keep the OWNER_DIED bit */
 		newval = (curval & ~FUTEX_TID_MASK) | current->pid;
 		ownerdied = 0;
 		lock_taken = 1;
@@ -1797,14 +1410,10 @@
 		goto retry_locked;
 
 	/*
-	 * We took the lock due to requeue or owner died take over.
+	 * We took the lock due to owner died take over.
 	 */
-	if (unlikely(lock_taken)) {
-		/* For requeue we need to fixup the pi_futex */
-		if (curval & FUTEX_WAITER_REQUEUED)
-			set_pi_futex_owner(hb, &q.key, curr);
+	if (unlikely(lock_taken))
 		goto out_unlock_release_sem;
-	}
 
 	/*
 	 * We dont have the lock. Look up the PI state (or create it if
@@ -2289,8 +1898,6 @@
 		 * userspace.
 		 */
 		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-		/* Also keep the FUTEX_WAITER_REQUEUED flag if set */
-		mval |= (uval & FUTEX_WAITER_REQUEUED);
 		nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
 
 		if (nval == -EFAULT)
@@ -2427,9 +2034,6 @@
 	case FUTEX_TRYLOCK_PI:
 		ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
 		break;
-	case FUTEX_CMP_REQUEUE_PI:
-		ret = futex_requeue_pi(uaddr, fshared, uaddr2, val, val2, &val3);
-		break;
 	default:
 		ret = -ENOSYS;
 	}
@@ -2460,8 +2064,7 @@
 	/*
 	 * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
 	 */
-	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE
-	    || cmd == FUTEX_CMP_REQUEUE_PI)
+	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
 		val2 = (u32) (unsigned long) utime;
 
 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 2747894..f792136 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -157,8 +157,7 @@
 			t = ktime_add(ktime_get(), t);
 		tp = &t;
 	}
-	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE
-	    || cmd == FUTEX_CMP_REQUEUE_PI)
+	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
 		val2 = (int) (unsigned long) utime;
 
 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 24d7d78..d65305b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -99,6 +99,8 @@
 	ssize_t res;
 
 	data = filp->private_data;
+	if (!data->ready)
+		return -ENODATA;
 	res = snapshot_read_next(&data->handle, count);
 	if (res > 0) {
 		if (copy_to_user(buf, data_of(data->handle), res))
@@ -245,7 +247,7 @@
 		break;
 
 	case SNAPSHOT_UNFREEZE:
-		if (!data->frozen)
+		if (!data->frozen || data->ready)
 			break;
 		mutex_lock(&pm_mutex);
 		thaw_processes();
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index a6fbb41..17d28ce 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -56,7 +56,7 @@
  * state.
  */
 
-void
+static void
 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
 		   unsigned long mask)
 {
@@ -81,6 +81,29 @@
 }
 
 /*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+ */
+#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
+# define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+	do {
+		owner = *p;
+	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+}
+#else
+# define rt_mutex_cmpxchg(l,c,n)	(0)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	lock->owner = (struct task_struct *)
+			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+}
+#endif
+
+/*
  * Calculate task priority from the waiter list priority
  *
  * Return task->normal_prio when the waiter list is empty or when
@@ -100,7 +123,7 @@
  *
  * This can be both boosting and unboosting. task->pi_lock must be held.
  */
-void __rt_mutex_adjust_prio(struct task_struct *task)
+static void __rt_mutex_adjust_prio(struct task_struct *task)
 {
 	int prio = rt_mutex_getprio(task);
 
@@ -136,11 +159,11 @@
  * Decreases task's usage by one - may thus free the task.
  * Returns 0 or -EDEADLK.
  */
-int rt_mutex_adjust_prio_chain(struct task_struct *task,
-			       int deadlock_detect,
-			       struct rt_mutex *orig_lock,
-			       struct rt_mutex_waiter *orig_waiter,
-			       struct task_struct *top_task)
+static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+				      int deadlock_detect,
+				      struct rt_mutex *orig_lock,
+				      struct rt_mutex_waiter *orig_waiter,
+				      struct task_struct *top_task)
 {
 	struct rt_mutex *lock;
 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
@@ -514,8 +537,8 @@
  *
  * Must be called with lock->wait_lock held
  */
-void remove_waiter(struct rt_mutex *lock,
-		   struct rt_mutex_waiter *waiter)
+static void remove_waiter(struct rt_mutex *lock,
+			  struct rt_mutex_waiter *waiter)
 {
 	int first = (waiter == rt_mutex_top_waiter(lock));
 	struct task_struct *owner = rt_mutex_owner(lock);
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 242ec7e..9c75856 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -113,29 +113,6 @@
 }
 
 /*
- * We can speed up the acquire/release, if the architecture
- * supports cmpxchg and if there's no debugging state to be set up
- */
-#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
-# define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
-static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	unsigned long owner, *p = (unsigned long *) &lock->owner;
-
-	do {
-		owner = *p;
-	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
-}
-#else
-# define rt_mutex_cmpxchg(l,c,n)	(0)
-static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	lock->owner = (struct task_struct *)
-			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
-}
-#endif
-
-/*
  * PI-futex support (proxy locking functions, etc.):
  */
 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
@@ -143,15 +120,4 @@
 				       struct task_struct *proxy_owner);
 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
 				  struct task_struct *proxy_owner);
-
-extern void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
-			       unsigned long mask);
-extern void __rt_mutex_adjust_prio(struct task_struct *task);
-extern int rt_mutex_adjust_prio_chain(struct task_struct *task,
-				      int deadlock_detect,
-				      struct rt_mutex *orig_lock,
-				      struct rt_mutex_waiter *orig_waiter,
-				      struct task_struct *top_task);
-extern void remove_waiter(struct rt_mutex *lock,
-			  struct rt_mutex_waiter *waiter);
 #endif
diff --git a/kernel/sched.c b/kernel/sched.c
index 13cdab3..a747591 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1159,21 +1159,72 @@
 {
 	unsigned long flags;
 	struct rq *rq;
-	int preempted;
+	struct prio_array *array;
+	int running;
 
 repeat:
-	rq = task_rq_lock(p, &flags);
-	/* Must be off runqueue entirely, not preempted. */
-	if (unlikely(p->array || task_running(rq, p))) {
-		/* If it's preempted, we yield.  It could be a while. */
-		preempted = !task_running(rq, p);
-		task_rq_unlock(rq, &flags);
+	/*
+	 * We do the initial early heuristics without holding
+	 * any task-queue locks at all. We'll only try to get
+	 * the runqueue lock when things look like they will
+	 * work out!
+	 */
+	rq = task_rq(p);
+
+	/*
+	 * If the task is actively running on another CPU
+	 * still, just relax and busy-wait without holding
+	 * any locks.
+	 *
+	 * NOTE! Since we don't hold any locks, it's not
+	 * even sure that "rq" stays as the right runqueue!
+	 * But we don't care, since "task_running()" will
+	 * return false if the runqueue has changed and p
+	 * is actually now running somewhere else!
+	 */
+	while (task_running(rq, p))
 		cpu_relax();
-		if (preempted)
-			yield();
+
+	/*
+	 * Ok, time to look more closely! We need the rq
+	 * lock now, to be *sure*. If we're wrong, we'll
+	 * just go back and repeat.
+	 */
+	rq = task_rq_lock(p, &flags);
+	running = task_running(rq, p);
+	array = p->array;
+	task_rq_unlock(rq, &flags);
+
+	/*
+	 * Was it really running after all now that we
+	 * checked with the proper locks actually held?
+	 *
+	 * Oops. Go back and try again..
+	 */
+	if (unlikely(running)) {
+		cpu_relax();
 		goto repeat;
 	}
-	task_rq_unlock(rq, &flags);
+
+	/*
+	 * It's not enough that it's not actively running,
+	 * it must be off the runqueue _entirely_, and not
+	 * preempted!
+	 *
+	 * So if it wa still runnable (but just not actively
+	 * running right now), it's preempted, and we should
+	 * yield - it could be a while.
+	 */
+	if (unlikely(array)) {
+		yield();
+		goto repeat;
+	}
+
+	/*
+	 * Ahh, all good. It wasn't running, and it wasn't
+	 * runnable, which means that it will never become
+	 * running in the future either. We're all done!
+	 */
 }
 
 /***
@@ -7071,12 +7122,13 @@
 void normalize_rt_tasks(void)
 {
 	struct prio_array *array;
-	struct task_struct *p;
+	struct task_struct *g, *p;
 	unsigned long flags;
 	struct rq *rq;
 
 	read_lock_irq(&tasklist_lock);
-	for_each_process(p) {
+
+	do_each_thread(g, p) {
 		if (!rt_task(p))
 			continue;
 
@@ -7094,7 +7146,8 @@
 
 		__task_rq_unlock(rq);
 		spin_unlock_irqrestore(&p->pi_lock, flags);
-	}
+	} while_each_thread(g, p);
+
 	read_unlock_irq(&tasklist_lock);
 }
 
diff --git a/kernel/signal.c b/kernel/signal.c
index fe590e0..f940560 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -363,7 +363,13 @@
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
-	int signr = __dequeue_signal(&tsk->pending, mask, info);
+	int signr = 0;
+
+	/* We only dequeue private signals from ourselves, we don't let
+	 * signalfd steal them
+	 */
+	if (tsk == current)
+		signr = __dequeue_signal(&tsk->pending, mask, info);
 	if (!signr) {
 		signr = __dequeue_signal(&tsk->signal->shared_pending,
 					 mask, info);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eb7180d..a45d1f0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -326,9 +326,10 @@
 	pte_t entry;
 
 	entry = pte_mkwrite(pte_mkdirty(*ptep));
-	ptep_set_access_flags(vma, address, ptep, entry, 1);
-	update_mmu_cache(vma, address, entry);
-	lazy_mmu_prot_update(entry);
+	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+		update_mmu_cache(vma, address, entry);
+		lazy_mmu_prot_update(entry);
+	}
 }
 
 
diff --git a/mm/memory.c b/mm/memory.c
index cb94488..f64cbf9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1691,9 +1691,10 @@
 		flush_cache_page(vma, address, pte_pfn(orig_pte));
 		entry = pte_mkyoung(orig_pte);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-		ptep_set_access_flags(vma, address, page_table, entry, 1);
-		update_mmu_cache(vma, address, entry);
-		lazy_mmu_prot_update(entry);
+		if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+			update_mmu_cache(vma, address, entry);
+			lazy_mmu_prot_update(entry);
+		}
 		ret |= VM_FAULT_WRITE;
 		goto unlock;
 	}
@@ -2525,10 +2526,9 @@
 		pte_t *pte, pmd_t *pmd, int write_access)
 {
 	pte_t entry;
-	pte_t old_entry;
 	spinlock_t *ptl;
 
-	old_entry = entry = *pte;
+	entry = *pte;
 	if (!pte_present(entry)) {
 		if (pte_none(entry)) {
 			if (vma->vm_ops) {
@@ -2561,8 +2561,7 @@
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
-	if (!pte_same(old_entry, entry)) {
-		ptep_set_access_flags(vma, address, pte, entry, write_access);
+	if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
 		update_mmu_cache(vma, address, entry);
 		lazy_mmu_prot_update(entry);
 	} else {
diff --git a/mm/slub.c b/mm/slub.c
index c9ab688..fa28b16 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2436,6 +2436,7 @@
 void __init kmem_cache_init(void)
 {
 	int i;
+	int caches = 0;
 
 #ifdef CONFIG_NUMA
 	/*
@@ -2446,20 +2447,29 @@
 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
 		sizeof(struct kmem_cache_node), GFP_KERNEL);
 	kmalloc_caches[0].refcount = -1;
+	caches++;
 #endif
 
 	/* Able to allocate the per node structures */
 	slab_state = PARTIAL;
 
 	/* Caches that are not of the two-to-the-power-of size */
-	create_kmalloc_cache(&kmalloc_caches[1],
+	if (KMALLOC_MIN_SIZE <= 64) {
+		create_kmalloc_cache(&kmalloc_caches[1],
 				"kmalloc-96", 96, GFP_KERNEL);
-	create_kmalloc_cache(&kmalloc_caches[2],
+		caches++;
+	}
+	if (KMALLOC_MIN_SIZE <= 128) {
+		create_kmalloc_cache(&kmalloc_caches[2],
 				"kmalloc-192", 192, GFP_KERNEL);
+		caches++;
+	}
 
-	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
+	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
 		create_kmalloc_cache(&kmalloc_caches[i],
 			"kmalloc", 1 << i, GFP_KERNEL);
+		caches++;
+	}
 
 	slab_state = UP;
 
@@ -2476,8 +2486,8 @@
 				nr_cpu_ids * sizeof(struct page *);
 
 	printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
-		" Processors=%d, Nodes=%d\n",
-		KMALLOC_SHIFT_HIGH, cache_line_size(),
+		" CPUs=%d, Nodes=%d\n",
+		caches, cache_line_size(),
 		slub_min_order, slub_max_order, slub_min_objects,
 		nr_cpu_ids, nr_node_ids);
 }
@@ -2867,7 +2877,7 @@
 
 	order = get_order(sizeof(struct location) * max);
 
-	l = (void *)__get_free_pages(GFP_KERNEL, order);
+	l = (void *)__get_free_pages(GFP_ATOMIC, order);
 
 	if (!l)
 		return 0;
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 5a2bef4..7a22f0f 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -775,7 +775,8 @@
 		out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff);
 	}
 
-	snd_pmac_sound_feature(chip, 0);
+	if (chip->node)
+		snd_pmac_sound_feature(chip, 0);
 
 	/* clean up mixer if any */
 	if (chip->mixer_free)
@@ -925,6 +926,7 @@
 	}
 	if (! sound) {
 		of_node_put(chip->node);
+		chip->node = NULL;
 		return -ENODEV;
 	}
 	prop = of_get_property(sound, "sub-frame", NULL);
@@ -937,7 +939,9 @@
 		printk(KERN_INFO "snd-powermac no longer handles any "
 				 "machines with a layout-id property "
 				 "in the device-tree, use snd-aoa.\n");
+		of_node_put(sound);
 		of_node_put(chip->node);
+		chip->node = NULL;
 		return -ENODEV;
 	}
 	/* This should be verified on older screamers */
@@ -1297,8 +1301,6 @@
 	return 0;
 
  __error:
-	if (chip->pdev)
-		pci_dev_put(chip->pdev);
 	snd_pmac_free(chip);
 	return err;
 }