Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] implement ticket locks for Itanium
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 6d03487..aafcaa6 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -199,18 +199,22 @@
 
 Not all bits in the mask can be modified.  Not all bits that can be
 modified do anything.  Not all hot keys can be individually controlled
-by the mask.  Some models do not support the mask at all, and in those
-models, hot keys cannot be controlled individually.  The behaviour of
-the mask is, therefore, highly dependent on the ThinkPad model.
+by the mask.  Some models do not support the mask at all.  The behaviour
+of the mask is, therefore, highly dependent on the ThinkPad model.
+
+The driver will filter out any unmasked hotkeys, so even if the firmware
+doesn't allow disabling an specific hotkey, the driver will not report
+events for unmasked hotkeys.
 
 Note that unmasking some keys prevents their default behavior.  For
 example, if Fn+F5 is unmasked, that key will no longer enable/disable
-Bluetooth by itself.
+Bluetooth by itself in firmware.
 
-Note also that not all Fn key combinations are supported through ACPI.
-For example, on the X40, the brightness, volume and "Access IBM" buttons
-do not generate ACPI events even with this driver.  They *can* be used
-through the "ThinkPad Buttons" utility, see http://www.nongnu.org/tpb/
+Note also that not all Fn key combinations are supported through ACPI
+depending on the ThinkPad model and firmware version.  On those
+ThinkPads, it is still possible to support some extra hotkeys by
+polling the "CMOS NVRAM" at least 10 times per second.  The driver
+attempts to enables this functionality automatically when required.
 
 procfs notes:
 
@@ -255,18 +259,11 @@
 		1: does nothing
 
 	hotkey_mask:
-		bit mask to enable driver-handling (and depending on
+		bit mask to enable reporting (and depending on
 		the firmware, ACPI event generation) for each hot key
 		(see above).  Returns the current status of the hot keys
 		mask, and allows one to modify it.
 
-		Note: when NVRAM polling is active, the firmware mask
-		will be different from the value returned by
-		hotkey_mask.  The driver will retain enabled bits for
-		hotkeys that are under NVRAM polling even if the
-		firmware refuses them, and will not set these bits on
-		the firmware hot key mask.
-
 	hotkey_all_mask:
 		bit mask that should enable event reporting for all
 		supported hot keys, when echoed to hotkey_mask above.
@@ -279,7 +276,8 @@
 		bit mask that should enable event reporting for all
 		supported hot keys, except those which are always
 		handled by the firmware anyway.  Echo it to
-		hotkey_mask above, to use.
+		hotkey_mask above, to use.  This is the default mask
+		used by the driver.
 
 	hotkey_source_mask:
 		bit mask that selects which hot keys will the driver
@@ -287,9 +285,10 @@
 		based on the capabilities reported by the ACPI firmware,
 		but it can be overridden at runtime.
 
-		Hot keys whose bits are set in both hotkey_source_mask
-		and also on hotkey_mask are polled for in NVRAM.  Only a
-		few hot keys are available through CMOS NVRAM polling.
+		Hot keys whose bits are set in hotkey_source_mask are
+		polled for in NVRAM, and reported as hotkey events if
+		enabled in hotkey_mask.  Only a few hot keys are
+		available through CMOS NVRAM polling.
 
 		Warning: when in NVRAM mode, the volume up/down/mute
 		keys are synthesized according to changes in the mixer,
@@ -525,6 +524,7 @@
 0x2305		System is waking up from suspend to eject bay
 0x2404		System is waking up from hibernation to undock
 0x2405		System is waking up from hibernation to eject bay
+0x5010		Brightness level changed/control event
 
 The above events are never propagated by the driver.
 
@@ -532,7 +532,6 @@
 0x4003		Undocked (see 0x2x04), can sleep again
 0x500B		Tablet pen inserted into its storage bay
 0x500C		Tablet pen removed from its storage bay
-0x5010		Brightness level changed (newer Lenovo BIOSes)
 
 The above events are propagated by the driver.
 
@@ -621,6 +620,8 @@
 2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
    and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN.  Process
    these keys on userspace somehow (e.g. by calling xbacklight).
+   The driver will do this automatically if it detects that ACPI video
+   has been disabled.
 
 
 Bluetooth
@@ -1459,3 +1460,8 @@
 0x020400:	Marker for 16 LEDs support.  Also, LEDs that are known
 		to not exist in a given model are not registered with
 		the LED sysfs class anymore.
+
+0x020500:	Updated hotkey driver, hotkey_mask is always available
+		and it is always able to disable hot keys.  Very old
+		thinkpads are properly supported.  hotkey_bios_mask
+		is deprecated and marked for removal.
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 2906665..ecf4d48 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,4 +1,5 @@
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
 
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index e17e3c3..ac34c0d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -83,6 +83,8 @@
 	ldr	r0, [sp]
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
+#else
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #endif
 	.endm
 
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e961221..eecd2a9 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the Linux/MIPS kernel.
 #
 
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
+
 extra-y		:= head.o init_task.o vmlinux.lds
 
 obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9bf0e3d..162b299 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -11,15 +11,15 @@
 	note PT_NOTE FLAGS(4);	/* R__ */
 }
 
-ifdef CONFIG_32BIT
-	ifdef CONFIG_CPU_LITTLE_ENDIAN
+#ifdef CONFIG_32BIT
+	#ifdef CONFIG_CPU_LITTLE_ENDIAN
 		jiffies  = jiffies_64;
-	else
+	#else
 		jiffies  = jiffies_64 + 4;
-	endif
-else
+	#endif
+#else
 	jiffies  = jiffies_64;
-endif
+#endif
 
 SECTIONS
 {
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 6cdbf7e..9d83d3b 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -258,8 +258,6 @@
 static inline u32 vio_dring_avail(struct vio_dring_state *dr,
 				  unsigned int ring_size)
 {
-	MAYBE_BUILD_BUG_ON(!is_power_of_2(ring_size));
-
 	return (dr->pending -
 		((dr->prod - dr->cons) & (ring_size - 1)));
 }
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 75e4f00..f543b70 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -23,13 +23,14 @@
  */
 	.text
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/segment.h>
 #include <asm/page_types.h>
 #include <asm/boot.h>
 #include <asm/asm-offsets.h>
 
-	.section ".text.head","ax",@progbits
+	__HEAD
 ENTRY(startup_32)
 	cld
 	/*
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index f62c284..077e1b6 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -24,6 +24,7 @@
 	.code32
 	.text
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/segment.h>
 #include <asm/pgtable_types.h>
@@ -33,7 +34,7 @@
 #include <asm/processor-flags.h>
 #include <asm/asm-offsets.h>
 
-	.section ".text.head"
+	__HEAD
 	.code32
 ENTRY(startup_32)
 	cld
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index cc353e1..f4193bb 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -1,3 +1,5 @@
+#include <asm-generic/vmlinux.lds.h>
+
 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
 
 #undef i386
@@ -18,9 +20,9 @@
 	 * address 0.
 	 */
 	. = 0;
-	.text.head : {
+	.head.text : {
 		_head = . ;
-		*(.text.head)
+		HEAD_TEXT
 		_ehead = . ;
 	}
 	.rodata.compressed : {
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7b467bf..d1f4a76 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -277,6 +277,7 @@
 typedef struct page *pgtable_t;
 
 extern pteval_t __supported_pte_mask;
+extern void set_nx(void);
 extern int nx_enabled;
 
 #define pgprot_writecombine	pgprot_writecombine
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 6f0695d..25a9284 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -165,21 +165,11 @@
 	return 0;
 }
 
-static inline int cpu_to_node(int cpu)
-{
-	return 0;
-}
-
 static inline int early_cpu_to_node(int cpu)
 {
 	return 0;
 }
 
-static inline const struct cpumask *cpumask_of_node(int node)
-{
-	return cpu_online_mask;
-}
-
 static inline void setup_node_to_cpumask_map(void) { }
 
 #endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 7029f0e..472763d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -98,8 +98,9 @@
 };
 
 /* Inject mce on current CPU */
-static int raise_local(struct mce *m)
+static int raise_local(void)
 {
+	struct mce *m = &__get_cpu_var(injectm);
 	int context = MCJ_CTX(m->inject_flags);
 	int ret = 0;
 	int cpu = m->extcpu;
@@ -167,12 +168,12 @@
 			}
 			cpu_relax();
 		}
-		raise_local(m);
+		raise_local();
 		put_cpu();
 		put_online_cpus();
 	} else
 #endif
-		raise_local(m);
+		raise_local();
 }
 
 /* Error injection interface */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2f5aab2..4b2af86 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -305,13 +305,25 @@
 static u64 mce_rdmsrl(u32 msr)
 {
 	u64 v;
+
 	if (__get_cpu_var(injectm).finished) {
 		int offset = msr_to_offset(msr);
+
 		if (offset < 0)
 			return 0;
 		return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
 	}
-	rdmsrl(msr, v);
+
+	if (rdmsrl_safe(msr, &v)) {
+		WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
+		/*
+		 * Return zero in case the access faulted. This should
+		 * not happen normally but can happen if the CPU does
+		 * something weird, or if the code is buggy.
+		 */
+		v = 0;
+	}
+
 	return v;
 }
 
@@ -319,6 +331,7 @@
 {
 	if (__get_cpu_var(injectm).finished) {
 		int offset = msr_to_offset(msr);
+
 		if (offset >= 0)
 			*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
 		return;
@@ -415,7 +428,7 @@
 		m->ip = mce_rdmsrl(rip_msr);
 }
 
-#ifdef CONFIG_X86_LOCAL_APIC 
+#ifdef CONFIG_X86_LOCAL_APIC
 /*
  * Called after interrupts have been reenabled again
  * when a MCE happened during an interrupts off region
@@ -1172,6 +1185,7 @@
 		return -ENOMEM;
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
+
 		b->ctl = -1ULL;
 		b->init = 1;
 	}
@@ -1203,6 +1217,7 @@
 	banks = b;
 	if (!mce_banks) {
 		int err = mce_banks_init();
+
 		if (err)
 			return err;
 	}
@@ -1237,6 +1252,7 @@
 
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
+
 		if (!b->init)
 			continue;
 		wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
@@ -1626,6 +1642,7 @@
 
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
+
 		if (b->init)
 			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
 	}
@@ -1911,6 +1928,7 @@
 		cmci_clear();
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
+
 		if (b->init)
 			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
 	}
@@ -1928,6 +1946,7 @@
 		cmci_reenable();
 	for (i = 0; i < banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
+
 		if (b->init)
 			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
 	}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 63a56d1..b3a1dba 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -34,20 +34,31 @@
 /* How long to wait between reporting thermal events */
 #define CHECK_INTERVAL		(300 * HZ)
 
-static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
-static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
-static DEFINE_PER_CPU(bool, thermal_throttle_active);
+/*
+ * Current thermal throttling state:
+ */
+struct thermal_state {
+	bool			is_throttled;
 
-static atomic_t therm_throt_en		= ATOMIC_INIT(0);
+	u64			next_check;
+	unsigned long		throttle_count;
+	unsigned long		last_throttle_count;
+};
+
+static DEFINE_PER_CPU(struct thermal_state, thermal_state);
+
+static atomic_t therm_throt_en	= ATOMIC_INIT(0);
 
 #ifdef CONFIG_SYSFS
 #define define_therm_throt_sysdev_one_ro(_name)				\
 	static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
 
 #define define_therm_throt_sysdev_show_func(name)			\
-static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev,	\
-					struct sysdev_attribute *attr,	\
-					      char *buf)		\
+									\
+static ssize_t therm_throt_sysdev_show_##name(				\
+			struct sys_device *dev,				\
+			struct sysdev_attribute *attr,			\
+			char *buf)					\
 {									\
 	unsigned int cpu = dev->id;					\
 	ssize_t ret;							\
@@ -55,7 +66,7 @@
 	preempt_disable();	/* CPU hotplug */			\
 	if (cpu_online(cpu))						\
 		ret = sprintf(buf, "%lu\n",				\
-			      per_cpu(thermal_throttle_##name, cpu));	\
+			      per_cpu(thermal_state, cpu).name);	\
 	else								\
 		ret = 0;						\
 	preempt_enable();						\
@@ -63,11 +74,11 @@
 	return ret;							\
 }
 
-define_therm_throt_sysdev_show_func(count);
-define_therm_throt_sysdev_one_ro(count);
+define_therm_throt_sysdev_show_func(throttle_count);
+define_therm_throt_sysdev_one_ro(throttle_count);
 
 static struct attribute *thermal_throttle_attrs[] = {
-	&attr_count.attr,
+	&attr_throttle_count.attr,
 	NULL
 };
 
@@ -93,33 +104,39 @@
  *          1 : Event should be logged further, and a message has been
  *              printed to the syslog.
  */
-static int therm_throt_process(int curr)
+static int therm_throt_process(bool is_throttled)
 {
-	unsigned int cpu = smp_processor_id();
-	__u64 tmp_jiffs = get_jiffies_64();
-	bool was_throttled = __get_cpu_var(thermal_throttle_active);
-	bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
+	struct thermal_state *state;
+	unsigned int this_cpu;
+	bool was_throttled;
+	u64 now;
+
+	this_cpu = smp_processor_id();
+	now = get_jiffies_64();
+	state = &per_cpu(thermal_state, this_cpu);
+
+	was_throttled = state->is_throttled;
+	state->is_throttled = is_throttled;
 
 	if (is_throttled)
-		__get_cpu_var(thermal_throttle_count)++;
+		state->throttle_count++;
 
-	if (!(was_throttled ^ is_throttled) &&
-	    time_before64(tmp_jiffs, __get_cpu_var(next_check)))
+	if (time_before64(now, state->next_check) &&
+			state->throttle_count != state->last_throttle_count)
 		return 0;
 
-	__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
+	state->next_check = now + CHECK_INTERVAL;
+	state->last_throttle_count = state->throttle_count;
 
 	/* if we just entered the thermal event */
 	if (is_throttled) {
-		printk(KERN_CRIT "CPU%d: Temperature above threshold, "
-		       "cpu clock throttled (total events = %lu)\n",
-		       cpu, __get_cpu_var(thermal_throttle_count));
+		printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count);
 
 		add_taint(TAINT_MACHINE_CHECK);
 		return 1;
 	}
 	if (was_throttled) {
-		printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
+		printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu);
 		return 1;
 	}
 
@@ -213,7 +230,7 @@
 	__u64 msr_val;
 
 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
-	if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
+	if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0))
 		mce_log_therm_throt_event(msr_val);
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a3c7adb..b5801c3 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1790,6 +1790,9 @@
 void set_perf_event_pending(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
+	if (!x86_pmu.apic || !x86_pmu_initialized())
+		return;
+
 	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
 #endif
 }
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 2acfd3f..41fd965 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -178,6 +178,11 @@
 
 static inline void early_console_register(struct console *con, int keep_early)
 {
+	if (early_console->index != -1) {
+		printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
+		       con->name);
+		return;
+	}
 	early_console = con;
 	if (keep_early)
 		early_console->flags &= ~CON_BOOT;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 218aad7..050c278 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -79,7 +79,7 @@
  * any particular GDT layout, because we load our own as soon as we
  * can.
  */
-.section .text.head,"ax",@progbits
+__HEAD
 ENTRY(startup_32)
 	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
 		us to not reload segments */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d0bc0a1..780cd92 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -40,7 +40,7 @@
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
 	.text
-	.section .text.head
+	__HEAD
 	.code64
 	.globl startup_64
 startup_64:
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a665c71..7e37dce 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -72,11 +72,9 @@
 
 /*
  * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.. We have a special link segment
- * for this.
+ * F0 0F bug workaround.
  */
-gate_desc idt_table[NR_VECTORS]
-	__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
 #endif
 
 DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 027b5b4..f379309 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -114,7 +114,7 @@
 		return;
 
 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
-		pr_info("Skipping synchronization checks as TSC is reliable.\n");
+		printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
 		return;
 	}
 
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a46accc..92929fb 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,17 +65,11 @@
 #endif
 
 	/* Text and read-only data */
-
-	/* bootstrapping code */
-	.text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
-		_text = .;
-		*(.text.head)
-	} :text = 0x9090
-
-	/* The rest of the text */
 	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
+		_text = .;
+		/* bootstrapping code */
+		HEAD_TEXT
 #ifdef CONFIG_X86_32
-		/* not really needed, already page aligned */
 		. = ALIGN(PAGE_SIZE);
 		*(.text.page_aligned)
 #endif
@@ -94,13 +88,7 @@
 
 	NOTES :text :note
 
-	/* Exception table */
-	. = ALIGN(16);
-	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
-		__start___ex_table = .;
-		*(__ex_table)
-		__stop___ex_table = .;
-	} :text = 0x9090
+	EXCEPTION_TABLE(16) :text = 0x9090
 
 	RO_DATA(PAGE_SIZE)
 
@@ -118,7 +106,6 @@
 #endif
 
 		PAGE_ALIGNED_DATA(PAGE_SIZE)
-		*(.data.idt)
 
 		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
 
@@ -135,24 +122,21 @@
 #ifdef CONFIG_X86_64
 
 #define VSYSCALL_ADDR (-10*1024*1024)
-#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
-                            PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
-#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
-                            PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
 
-#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
+#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
 
-#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
+#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
 
+	. = ALIGN(4096);
+	__vsyscall_0 = .;
+
 	. = VSYSCALL_ADDR;
-	.vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
+	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
 		*(.vsyscall_0)
 	} :user
 
-	__vsyscall_0 = VSYSCALL_VIRT_ADDR;
-
 	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
 	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
 		*(.vsyscall_fn)
@@ -192,11 +176,9 @@
 		*(.vsyscall_3)
 	}
 
-	. = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
+	. = __vsyscall_0 + PAGE_SIZE;
 
 #undef VSYSCALL_ADDR
-#undef VSYSCALL_PHYS_ADDR
-#undef VSYSCALL_VIRT_ADDR
 #undef VLOAD_OFFSET
 #undef VLOAD
 #undef VVIRT_OFFSET
@@ -219,36 +201,12 @@
 	PERCPU_VADDR(0, :percpu)
 #endif
 
-	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
-		_sinittext = .;
-		INIT_TEXT
-		_einittext = .;
-	}
+	INIT_TEXT_SECTION(PAGE_SIZE)
 #ifdef CONFIG_X86_64
 	:init
 #endif
 
-	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
-		INIT_DATA
-	}
-
-	. = ALIGN(16);
-	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
-		__setup_start = .;
-		*(.init.setup)
-		__setup_end = .;
-	}
-	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
-		__initcall_start = .;
-		INITCALLS
-		__initcall_end = .;
-	}
-
-	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
-		__con_initcall_start = .;
-		*(.con_initcall.init)
-		__con_initcall_end = .;
-	}
+	INIT_DATA_SECTION(16)
 
 	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
 		__x86_cpu_dev_start = .;
@@ -256,8 +214,6 @@
 		__x86_cpu_dev_end = .;
 	}
 
-	SECURITY_INIT
-
 	. = ALIGN(8);
 	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
 		__parainstructions = .;
@@ -288,15 +244,6 @@
 		EXIT_DATA
 	}
 
-#ifdef CONFIG_BLK_DEV_INITRD
-	. = ALIGN(PAGE_SIZE);
-	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-		__initramfs_start = .;
-		*(.init.ramfs)
-		__initramfs_end = .;
-	}
-#endif
-
 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
 	PERCPU(PAGE_SIZE)
 #endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9b5a9f5..06630d2 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,9 +1,10 @@
 obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-	    pat.o pgtable.o physaddr.o gup.o
+	    pat.o pgtable.o physaddr.o gup.o setup_nx.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
 CFLAGS_physaddr.o		:= $(nostackp)
+CFLAGS_setup_nx.o		:= $(nostackp)
 
 obj-$(CONFIG_SMP)		+= tlb.o
 
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 0607119..73ffd55 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -28,69 +28,6 @@
 #endif
 ;
 
-int nx_enabled;
-
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-static int disable_nx __cpuinitdata;
-
-/*
- * noexec = on|off
- *
- * Control non-executable mappings for processes.
- *
- * on      Enable
- * off     Disable
- */
-static int __init noexec_setup(char *str)
-{
-	if (!str)
-		return -EINVAL;
-	if (!strncmp(str, "on", 2)) {
-		__supported_pte_mask |= _PAGE_NX;
-		disable_nx = 0;
-	} else if (!strncmp(str, "off", 3)) {
-		disable_nx = 1;
-		__supported_pte_mask &= ~_PAGE_NX;
-	}
-	return 0;
-}
-early_param("noexec", noexec_setup);
-#endif
-
-#ifdef CONFIG_X86_PAE
-static void __init set_nx(void)
-{
-	unsigned int v[4], l, h;
-
-	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-
-		if ((v[3] & (1 << 20)) && !disable_nx) {
-			rdmsr(MSR_EFER, l, h);
-			l |= EFER_NX;
-			wrmsr(MSR_EFER, l, h);
-			nx_enabled = 1;
-			__supported_pte_mask |= _PAGE_NX;
-		}
-	}
-}
-#else
-static inline void set_nx(void)
-{
-}
-#endif
-
-#ifdef CONFIG_X86_64
-void __cpuinit check_efer(void)
-{
-	unsigned long efer;
-
-	rdmsrl(MSR_EFER, efer);
-	if (!(efer & EFER_NX) || disable_nx)
-		__supported_pte_mask &= ~_PAGE_NX;
-}
-#endif
-
 static void __init find_early_table_space(unsigned long end, int use_pse,
 					  int use_gbpages)
 {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7257cf3..e78cd0e 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -81,6 +81,7 @@
 void pat_init(void)
 {
 	u64 pat;
+	bool boot_cpu = !boot_pat_state;
 
 	if (!pat_enabled)
 		return;
@@ -122,8 +123,10 @@
 		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
 
 	wrmsrl(MSR_IA32_CR_PAT, pat);
-	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
-	       smp_processor_id(), boot_pat_state, pat);
+
+	if (boot_cpu)
+		printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
+		       smp_processor_id(), boot_pat_state, pat);
 }
 
 #undef PAT
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
new file mode 100644
index 0000000..513d8ed
--- /dev/null
+++ b/arch/x86/mm/setup_nx.c
@@ -0,0 +1,69 @@
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+
+int nx_enabled;
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+static int disable_nx __cpuinitdata;
+
+/*
+ * noexec = on|off
+ *
+ * Control non-executable mappings for processes.
+ *
+ * on      Enable
+ * off     Disable
+ */
+static int __init noexec_setup(char *str)
+{
+	if (!str)
+		return -EINVAL;
+	if (!strncmp(str, "on", 2)) {
+		__supported_pte_mask |= _PAGE_NX;
+		disable_nx = 0;
+	} else if (!strncmp(str, "off", 3)) {
+		disable_nx = 1;
+		__supported_pte_mask &= ~_PAGE_NX;
+	}
+	return 0;
+}
+early_param("noexec", noexec_setup);
+#endif
+
+#ifdef CONFIG_X86_PAE
+void __init set_nx(void)
+{
+	unsigned int v[4], l, h;
+
+	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+
+		if ((v[3] & (1 << 20)) && !disable_nx) {
+			rdmsr(MSR_EFER, l, h);
+			l |= EFER_NX;
+			wrmsr(MSR_EFER, l, h);
+			nx_enabled = 1;
+			__supported_pte_mask |= _PAGE_NX;
+		}
+	}
+}
+#else
+void set_nx(void)
+{
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void __cpuinit check_efer(void)
+{
+	unsigned long efer;
+
+	rdmsrl(MSR_EFER, efer);
+	if (!(efer & EFER_NX) || disable_nx)
+		__supported_pte_mask &= ~_PAGE_NX;
+}
+#endif
+
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 544eb749..3439616 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1082,6 +1082,11 @@
 
 	__supported_pte_mask |= _PAGE_IOMAP;
 
+#ifdef CONFIG_X86_64
+	/* Work out if we support NX */
+	check_efer();
+#endif
+
 	xen_setup_features();
 
 	/* Get mfn list */
@@ -1123,11 +1128,6 @@
 
 	pgd = (pgd_t *)xen_start_info->pt_base;
 
-#ifdef CONFIG_X86_64
-	/* Work out if we support NX */
-	check_efer();
-#endif
-
 	/* Don't do the full vcpu_info placement stuff until we have a
 	   possible map and a non-dummy shared_info. */
 	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 135fbfe..7411915 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -94,36 +94,33 @@
 
 EXPORT_SYMBOL(acpi_bus_get_device);
 
+acpi_status acpi_bus_get_status_handle(acpi_handle handle,
+				       unsigned long long *sta)
+{
+	acpi_status status;
+
+	status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
+	if (ACPI_SUCCESS(status))
+		return AE_OK;
+
+	if (status == AE_NOT_FOUND) {
+		*sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
+		       ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING;
+		return AE_OK;
+	}
+	return status;
+}
+
 int acpi_bus_get_status(struct acpi_device *device)
 {
-	acpi_status status = AE_OK;
-	unsigned long long sta = 0;
+	acpi_status status;
+	unsigned long long sta;
 
+	status = acpi_bus_get_status_handle(device->handle, &sta);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
 
-	if (!device)
-		return -EINVAL;
-
-	/*
-	 * Evaluate _STA if present.
-	 */
-	if (device->flags.dynamic_status) {
-		status =
-		    acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
-		if (ACPI_FAILURE(status))
-			return -ENODEV;
-		STRUCT_TO_INT(device->status) = (int)sta;
-	}
-
-	/*
-	 * According to ACPI spec some device can be present and functional
-	 * even if the parent is not present but functional.
-	 * In such conditions the child device should not inherit the status
-	 * from the parent.
-	 */
-	else
-		STRUCT_TO_INT(device->status) =
-		    ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
-		    ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING;
+	STRUCT_TO_INT(device->status) = (int) sta;
 
 	if (device->status.functional && !device->status.present) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
@@ -135,10 +132,8 @@
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
 			  device->pnp.bus_id,
 			  (u32) STRUCT_TO_INT(device->status)));
-
 	return 0;
 }
-
 EXPORT_SYMBOL(acpi_bus_get_status);
 
 void acpi_bus_private_data_handler(acpi_handle handle,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 408ebde..468921be 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -22,6 +22,8 @@
 #define ACPI_BUS_HID			"LNXSYBUS"
 #define ACPI_BUS_DEVICE_NAME		"System Bus"
 
+#define ACPI_IS_ROOT_DEVICE(device)    (!(device)->parent)
+
 static LIST_HEAD(acpi_device_list);
 static LIST_HEAD(acpi_bus_id_list);
 DEFINE_MUTEX(acpi_device_lock);
@@ -43,40 +45,19 @@
 {
 	int len;
 	int count;
-
-	if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
-		return -ENODEV;
+	struct acpi_hardware_id *id;
 
 	len = snprintf(modalias, size, "acpi:");
 	size -= len;
 
-	if (acpi_dev->flags.hardware_id) {
-		count = snprintf(&modalias[len], size, "%s:",
-				 acpi_dev->pnp.hardware_id);
+	list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+		count = snprintf(&modalias[len], size, "%s:", id->id);
 		if (count < 0 || count >= size)
 			return -EINVAL;
 		len += count;
 		size -= count;
 	}
 
-	if (acpi_dev->flags.compatible_ids) {
-		struct acpica_device_id_list *cid_list;
-		int i;
-
-		cid_list = acpi_dev->pnp.cid_list;
-		for (i = 0; i < cid_list->count; i++) {
-			count = snprintf(&modalias[len], size, "%s:",
-					 cid_list->ids[i].string);
-			if (count < 0 || count >= size) {
-				printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
-				       acpi_dev->pnp.device_name, i);
-				break;
-			}
-			len += count;
-			size -= count;
-		}
-	}
-
 	modalias[len] = '\0';
 	return len;
 }
@@ -183,7 +164,7 @@
 acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
 	struct acpi_device *acpi_dev = to_acpi_device(dev);
 
-	return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
+	return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
 }
 static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
 
@@ -219,17 +200,13 @@
 			goto end;
 	}
 
-	if (dev->flags.hardware_id) {
-		result = device_create_file(&dev->dev, &dev_attr_hid);
-		if (result)
-			goto end;
-	}
+	result = device_create_file(&dev->dev, &dev_attr_hid);
+	if (result)
+		goto end;
 
-	if (dev->flags.hardware_id || dev->flags.compatible_ids) {
-		result = device_create_file(&dev->dev, &dev_attr_modalias);
-		if (result)
-			goto end;
-	}
+	result = device_create_file(&dev->dev, &dev_attr_modalias);
+	if (result)
+		goto end;
 
         /*
          * If device has _EJ0, 'eject' file is created that is used to trigger
@@ -255,11 +232,8 @@
 	if (ACPI_SUCCESS(status))
 		device_remove_file(&dev->dev, &dev_attr_eject);
 
-	if (dev->flags.hardware_id || dev->flags.compatible_ids)
-		device_remove_file(&dev->dev, &dev_attr_modalias);
-
-	if (dev->flags.hardware_id)
-		device_remove_file(&dev->dev, &dev_attr_hid);
+	device_remove_file(&dev->dev, &dev_attr_modalias);
+	device_remove_file(&dev->dev, &dev_attr_hid);
 	if (dev->handle)
 		device_remove_file(&dev->dev, &dev_attr_path);
 }
@@ -271,6 +245,7 @@
 			  const struct acpi_device_id *ids)
 {
 	const struct acpi_device_id *id;
+	struct acpi_hardware_id *hwid;
 
 	/*
 	 * If the device is not present, it is unnecessary to load device
@@ -279,40 +254,30 @@
 	if (!device->status.present)
 		return -ENODEV;
 
-	if (device->flags.hardware_id) {
-		for (id = ids; id->id[0]; id++) {
-			if (!strcmp((char*)id->id, device->pnp.hardware_id))
+	for (id = ids; id->id[0]; id++)
+		list_for_each_entry(hwid, &device->pnp.ids, list)
+			if (!strcmp((char *) id->id, hwid->id))
 				return 0;
-		}
-	}
-
-	if (device->flags.compatible_ids) {
-		struct acpica_device_id_list *cid_list = device->pnp.cid_list;
-		int i;
-
-		for (id = ids; id->id[0]; id++) {
-			/* compare multiple _CID entries against driver ids */
-			for (i = 0; i < cid_list->count; i++) {
-				if (!strcmp((char*)id->id,
-					    cid_list->ids[i].string))
-					return 0;
-			}
-		}
-	}
 
 	return -ENOENT;
 }
 EXPORT_SYMBOL(acpi_match_device_ids);
 
+static void acpi_free_ids(struct acpi_device *device)
+{
+	struct acpi_hardware_id *id, *tmp;
+
+	list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
+		kfree(id->id);
+		kfree(id);
+	}
+}
+
 static void acpi_device_release(struct device *dev)
 {
 	struct acpi_device *acpi_dev = to_acpi_device(dev);
 
-	kfree(acpi_dev->pnp.cid_list);
-	if (acpi_dev->flags.hardware_id)
-		kfree(acpi_dev->pnp.hardware_id);
-	if (acpi_dev->flags.unique_id)
-		kfree(acpi_dev->pnp.unique_id);
+	acpi_free_ids(acpi_dev);
 	kfree(acpi_dev);
 }
 
@@ -378,15 +343,13 @@
 static int acpi_device_install_notify_handler(struct acpi_device *device)
 {
 	acpi_status status;
-	char *hid;
 
-	hid = acpi_device_hid(device);
-	if (!strcmp(hid, ACPI_BUTTON_HID_POWERF))
+	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
 		status =
 		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
 						     acpi_device_notify_fixed,
 						     device);
-	else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEPF))
+	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
 		status =
 		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
 						     acpi_device_notify_fixed,
@@ -404,10 +367,10 @@
 
 static void acpi_device_remove_notify_handler(struct acpi_device *device)
 {
-	if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF))
+	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
 		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
 						acpi_device_notify_fixed);
-	else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF))
+	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
 		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
 						acpi_device_notify_fixed);
 	else
@@ -474,12 +437,12 @@
 	.uevent		= acpi_device_uevent,
 };
 
-static int acpi_device_register(struct acpi_device *device,
-				 struct acpi_device *parent)
+static int acpi_device_register(struct acpi_device *device)
 {
 	int result;
 	struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
 	int found = 0;
+
 	/*
 	 * Linkage
 	 * -------
@@ -501,8 +464,9 @@
 	 * If failed, create one and link it into acpi_bus_id_list
 	 */
 	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
-		if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
-			acpi_device_bus_id->instance_no ++;
+		if (!strcmp(acpi_device_bus_id->bus_id,
+			    acpi_device_hid(device))) {
+			acpi_device_bus_id->instance_no++;
 			found = 1;
 			kfree(new_bus_id);
 			break;
@@ -510,7 +474,7 @@
 	}
 	if (!found) {
 		acpi_device_bus_id = new_bus_id;
-		strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
+		strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
 		acpi_device_bus_id->instance_no = 0;
 		list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
 	}
@@ -524,7 +488,7 @@
 	mutex_unlock(&acpi_device_lock);
 
 	if (device->parent)
-		device->dev.parent = &parent->dev;
+		device->dev.parent = &device->parent->dev;
 	device->dev.bus = &acpi_bus_type;
 	device->dev.release = &acpi_device_release;
 	result = device_register(&device->dev);
@@ -664,6 +628,33 @@
 /* --------------------------------------------------------------------------
                                  Device Enumeration
    -------------------------------------------------------------------------- */
+static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+{
+	acpi_status status;
+	int ret;
+	struct acpi_device *device;
+
+	/*
+	 * Fixed hardware devices do not appear in the namespace and do not
+	 * have handles, but we fabricate acpi_devices for them, so we have
+	 * to deal with them specially.
+	 */
+	if (handle == NULL)
+		return acpi_root;
+
+	do {
+		status = acpi_get_parent(handle, &handle);
+		if (status == AE_NULL_ENTRY)
+			return NULL;
+		if (ACPI_FAILURE(status))
+			return acpi_root;
+
+		ret = acpi_bus_get_device(handle, &device);
+		if (ret == 0)
+			return device;
+	} while (1);
+}
+
 acpi_status
 acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
 {
@@ -876,11 +867,6 @@
 	if (ACPI_SUCCESS(status))
 		device->flags.dynamic_status = 1;
 
-	/* Presence of _CID indicates 'compatible_ids' */
-	status = acpi_get_handle(device->handle, "_CID", &temp);
-	if (ACPI_SUCCESS(status))
-		device->flags.compatible_ids = 1;
-
 	/* Presence of _RMV indicates 'removable' */
 	status = acpi_get_handle(device->handle, "_RMV", &temp);
 	if (ACPI_SUCCESS(status))
@@ -918,8 +904,7 @@
 	return 0;
 }
 
-static void acpi_device_get_busid(struct acpi_device *device,
-				  acpi_handle handle, int type)
+static void acpi_device_get_busid(struct acpi_device *device)
 {
 	char bus_id[5] = { '?', 0 };
 	struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
@@ -931,10 +916,12 @@
 	 * The device's Bus ID is simply the object name.
 	 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
 	 */
-	switch (type) {
-	case ACPI_BUS_TYPE_SYSTEM:
+	if (ACPI_IS_ROOT_DEVICE(device)) {
 		strcpy(device->pnp.bus_id, "ACPI");
-		break;
+		return;
+	}
+
+	switch (device->device_type) {
 	case ACPI_BUS_TYPE_POWER_BUTTON:
 		strcpy(device->pnp.bus_id, "PWRF");
 		break;
@@ -942,7 +929,7 @@
 		strcpy(device->pnp.bus_id, "SLPF");
 		break;
 	default:
-		acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+		acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
 		/* Clean up trailing underscores (if any) */
 		for (i = 3; i > 1; i--) {
 			if (bus_id[i] == '_')
@@ -1000,204 +987,132 @@
 	return acpi_get_handle(device->handle, "_DCK", &tmp);
 }
 
-static struct acpica_device_id_list*
-acpi_add_cid(
-	struct acpi_device_info         *info,
-	struct acpica_device_id         *new_cid)
+char *acpi_device_hid(struct acpi_device *device)
 {
-	struct acpica_device_id_list    *cid;
-	char                            *next_id_string;
-	acpi_size                       cid_length;
-	acpi_size                       new_cid_length;
-	u32                             i;
+	struct acpi_hardware_id *hid;
 
+	hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
+	return hid->id;
+}
+EXPORT_SYMBOL(acpi_device_hid);
 
-	/* Allocate new CID list with room for the new CID */
+static void acpi_add_id(struct acpi_device *device, const char *dev_id)
+{
+	struct acpi_hardware_id *id;
 
-	if (!new_cid)
-		new_cid_length = info->compatible_id_list.list_size;
-	else if (info->compatible_id_list.list_size)
-		new_cid_length = info->compatible_id_list.list_size +
-			new_cid->length + sizeof(struct acpica_device_id);
-	else
-		new_cid_length = sizeof(struct acpica_device_id_list) + new_cid->length;
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (!id)
+		return;
 
-	cid = ACPI_ALLOCATE_ZEROED(new_cid_length);
-	if (!cid) {
-		return NULL;
+	id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
+	if (!id->id) {
+		kfree(id);
+		return;
 	}
 
-	cid->list_size = new_cid_length;
-	cid->count = info->compatible_id_list.count;
-	if (new_cid)
-		cid->count++;
-	next_id_string = (char *) cid->ids + (cid->count * sizeof(struct acpica_device_id));
-
-	/* Copy all existing CIDs */
-
-	for (i = 0; i < info->compatible_id_list.count; i++) {
-		cid_length = info->compatible_id_list.ids[i].length;
-		cid->ids[i].string = next_id_string;
-		cid->ids[i].length = cid_length;
-
-		ACPI_MEMCPY(next_id_string, info->compatible_id_list.ids[i].string,
-			cid_length);
-
-		next_id_string += cid_length;
-	}
-
-	/* Append the new CID */
-
-	if (new_cid) {
-		cid->ids[i].string = next_id_string;
-		cid->ids[i].length = new_cid->length;
-
-		ACPI_MEMCPY(next_id_string, new_cid->string, new_cid->length);
-	}
-
-	return cid;
+	strcpy(id->id, dev_id);
+	list_add_tail(&id->list, &device->pnp.ids);
 }
 
-static void acpi_device_set_id(struct acpi_device *device,
-			       struct acpi_device *parent, acpi_handle handle,
-			       int type)
+static void acpi_device_set_id(struct acpi_device *device)
 {
-	struct acpi_device_info *info = NULL;
-	char *hid = NULL;
-	char *uid = NULL;
-	struct acpica_device_id_list *cid_list = NULL;
-	char *cid_add = NULL;
 	acpi_status status;
+	struct acpi_device_info *info;
+	struct acpica_device_id_list *cid_list;
+	int i;
 
-	switch (type) {
+	switch (device->device_type) {
 	case ACPI_BUS_TYPE_DEVICE:
-		status = acpi_get_object_info(handle, &info);
+		if (ACPI_IS_ROOT_DEVICE(device)) {
+			acpi_add_id(device, ACPI_SYSTEM_HID);
+			break;
+		} else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
+			/* \_SB_, the only root-level namespace device */
+			acpi_add_id(device, ACPI_BUS_HID);
+			strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+			strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+			break;
+		}
+
+		status = acpi_get_object_info(device->handle, &info);
 		if (ACPI_FAILURE(status)) {
 			printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
 			return;
 		}
 
 		if (info->valid & ACPI_VALID_HID)
-			hid = info->hardware_id.string;
-		if (info->valid & ACPI_VALID_UID)
-			uid = info->unique_id.string;
-		if (info->valid & ACPI_VALID_CID)
+			acpi_add_id(device, info->hardware_id.string);
+		if (info->valid & ACPI_VALID_CID) {
 			cid_list = &info->compatible_id_list;
+			for (i = 0; i < cid_list->count; i++)
+				acpi_add_id(device, cid_list->ids[i].string);
+		}
 		if (info->valid & ACPI_VALID_ADR) {
 			device->pnp.bus_address = info->address;
 			device->flags.bus_address = 1;
 		}
 
-		/* If we have a video/bay/dock device, add our selfdefined
-		   HID to the CID list. Like that the video/bay/dock drivers
-		   will get autoloaded and the device might still match
-		   against another driver.
-		*/
+		/*
+		 * Some devices don't reliably have _HIDs & _CIDs, so add
+		 * synthetic HIDs to make sure drivers can find them.
+		 */
 		if (acpi_is_video_device(device))
-			cid_add = ACPI_VIDEO_HID;
+			acpi_add_id(device, ACPI_VIDEO_HID);
 		else if (ACPI_SUCCESS(acpi_bay_match(device)))
-			cid_add = ACPI_BAY_HID;
+			acpi_add_id(device, ACPI_BAY_HID);
 		else if (ACPI_SUCCESS(acpi_dock_match(device)))
-			cid_add = ACPI_DOCK_HID;
+			acpi_add_id(device, ACPI_DOCK_HID);
 
 		break;
 	case ACPI_BUS_TYPE_POWER:
-		hid = ACPI_POWER_HID;
+		acpi_add_id(device, ACPI_POWER_HID);
 		break;
 	case ACPI_BUS_TYPE_PROCESSOR:
-		hid = ACPI_PROCESSOR_OBJECT_HID;
-		break;
-	case ACPI_BUS_TYPE_SYSTEM:
-		hid = ACPI_SYSTEM_HID;
+		acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
 		break;
 	case ACPI_BUS_TYPE_THERMAL:
-		hid = ACPI_THERMAL_HID;
+		acpi_add_id(device, ACPI_THERMAL_HID);
 		break;
 	case ACPI_BUS_TYPE_POWER_BUTTON:
-		hid = ACPI_BUTTON_HID_POWERF;
+		acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
 		break;
 	case ACPI_BUS_TYPE_SLEEP_BUTTON:
-		hid = ACPI_BUTTON_HID_SLEEPF;
+		acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
 		break;
 	}
 
 	/*
-	 * \_SB
-	 * ----
-	 * Fix for the system root bus device -- the only root-level device.
+	 * We build acpi_devices for some objects that don't have _HID or _CID,
+	 * e.g., PCI bridges and slots.  Drivers can't bind to these objects,
+	 * but we do use them indirectly by traversing the acpi_device tree.
+	 * This generic ID isn't useful for driver binding, but it provides
+	 * the useful property that "every acpi_device has an ID."
 	 */
-	if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) {
-		hid = ACPI_BUS_HID;
-		strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
-		strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
-	}
-
-	if (hid) {
-		device->pnp.hardware_id = ACPI_ALLOCATE_ZEROED(strlen (hid) + 1);
-		if (device->pnp.hardware_id) {
-			strcpy(device->pnp.hardware_id, hid);
-			device->flags.hardware_id = 1;
-		}
-	}
-	if (!device->flags.hardware_id)
-		device->pnp.hardware_id = "";
-
-	if (uid) {
-		device->pnp.unique_id = ACPI_ALLOCATE_ZEROED(strlen (uid) + 1);
-		if (device->pnp.unique_id) {
-			strcpy(device->pnp.unique_id, uid);
-			device->flags.unique_id = 1;
-		}
-	}
-	if (!device->flags.unique_id)
-		device->pnp.unique_id = "";
-
-	if (cid_list || cid_add) {
-		struct acpica_device_id_list *list;
-
-		if (cid_add) {
-			struct acpica_device_id cid;
-			cid.length = strlen (cid_add) + 1;
-			cid.string = cid_add;
-
-			list = acpi_add_cid(info, &cid);
-		} else {
-			list = acpi_add_cid(info, NULL);
-		}
-
-		if (list) {
-			device->pnp.cid_list = list;
-			if (cid_add)
-				device->flags.compatible_ids = 1;
-		}
-	}
-
-	kfree(info);
+	if (list_empty(&device->pnp.ids))
+		acpi_add_id(device, "device");
 }
 
-static int acpi_device_set_context(struct acpi_device *device, int type)
+static int acpi_device_set_context(struct acpi_device *device)
 {
-	acpi_status status = AE_OK;
-	int result = 0;
+	acpi_status status;
+
 	/*
 	 * Context
 	 * -------
 	 * Attach this 'struct acpi_device' to the ACPI object.  This makes
-	 * resolutions from handle->device very efficient.  Note that we need
-	 * to be careful with fixed-feature devices as they all attach to the
-	 * root object.
+	 * resolutions from handle->device very efficient.  Fixed hardware
+	 * devices have no handles, so we skip them.
 	 */
-	if (type != ACPI_BUS_TYPE_POWER_BUTTON &&
-	    type != ACPI_BUS_TYPE_SLEEP_BUTTON) {
-		status = acpi_attach_data(device->handle,
-					  acpi_bus_data_handler, device);
+	if (!device->handle)
+		return 0;
 
-		if (ACPI_FAILURE(status)) {
-			printk(KERN_ERR PREFIX "Error attaching device data\n");
-			result = -ENODEV;
-		}
-	}
-	return result;
+	status = acpi_attach_data(device->handle,
+				  acpi_bus_data_handler, device);
+	if (ACPI_SUCCESS(status))
+		return 0;
+
+	printk(KERN_ERR PREFIX "Error attaching device data\n");
+	return -ENODEV;
 }
 
 static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
@@ -1223,17 +1138,14 @@
 	return 0;
 }
 
-static int
-acpi_add_single_object(struct acpi_device **child,
-		       struct acpi_device *parent, acpi_handle handle, int type,
-			struct acpi_bus_ops *ops)
+static int acpi_add_single_object(struct acpi_device **child,
+				  acpi_handle handle, int type,
+				  unsigned long long sta,
+				  struct acpi_bus_ops *ops)
 {
-	int result = 0;
-	struct acpi_device *device = NULL;
-
-
-	if (!child)
-		return -EINVAL;
+	int result;
+	struct acpi_device *device;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 	device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
 	if (!device) {
@@ -1241,75 +1153,31 @@
 		return -ENOMEM;
 	}
 
+	INIT_LIST_HEAD(&device->pnp.ids);
+	device->device_type = type;
 	device->handle = handle;
-	device->parent = parent;
+	device->parent = acpi_bus_get_parent(handle);
 	device->bus_ops = *ops; /* workround for not call .start */
+	STRUCT_TO_INT(device->status) = sta;
 
-
-	acpi_device_get_busid(device, handle, type);
+	acpi_device_get_busid(device);
 
 	/*
 	 * Flags
 	 * -----
-	 * Get prior to calling acpi_bus_get_status() so we know whether
-	 * or not _STA is present.  Note that we only look for object
-	 * handles -- cannot evaluate objects until we know the device is
-	 * present and properly initialized.
+	 * Note that we only look for object handles -- cannot evaluate objects
+	 * until we know the device is present and properly initialized.
 	 */
 	result = acpi_bus_get_flags(device);
 	if (result)
 		goto end;
 
 	/*
-	 * Status
-	 * ------
-	 * See if the device is present.  We always assume that non-Device
-	 * and non-Processor objects (e.g. thermal zones, power resources,
-	 * etc.) are present, functioning, etc. (at least when parent object
-	 * is present).  Note that _STA has a different meaning for some
-	 * objects (e.g. power resources) so we need to be careful how we use
-	 * it.
-	 */
-	switch (type) {
-	case ACPI_BUS_TYPE_PROCESSOR:
-	case ACPI_BUS_TYPE_DEVICE:
-		result = acpi_bus_get_status(device);
-		if (ACPI_FAILURE(result)) {
-			result = -ENODEV;
-			goto end;
-		}
-		/*
-		 * When the device is neither present nor functional, the
-		 * device should not be added to Linux ACPI device tree.
-		 * When the status of the device is not present but functinal,
-		 * it should be added to Linux ACPI tree. For example : bay
-		 * device , dock device.
-		 * In such conditions it is unncessary to check whether it is
-		 * bay device or dock device.
-		 */
-		if (!device->status.present && !device->status.functional) {
-			result = -ENODEV;
-			goto end;
-		}
-		break;
-	default:
-		STRUCT_TO_INT(device->status) =
-		    ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
-		    ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING;
-		break;
-	}
-
-	/*
 	 * Initialize Device
 	 * -----------------
 	 * TBD: Synch with Core's enumeration/initialization process.
 	 */
-
-	/*
-	 * Hardware ID, Unique ID, & Bus Address
-	 * -------------------------------------
-	 */
-	acpi_device_set_id(device, parent, handle, type);
+	acpi_device_set_id(device);
 
 	/*
 	 * Power Management
@@ -1341,10 +1209,10 @@
 			goto end;
 	}
 
-	if ((result = acpi_device_set_context(device, type)))
+	if ((result = acpi_device_set_context(device)))
 		goto end;
 
-	result = acpi_device_register(device, parent);
+	result = acpi_device_register(device);
 
 	/*
 	 * Bind _ADR-Based Devices when hot add
@@ -1355,165 +1223,148 @@
 	}
 
 end:
-	if (!result)
+	if (!result) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			"Adding %s [%s] parent %s\n", dev_name(&device->dev),
+			 (char *) buffer.pointer,
+			 device->parent ? dev_name(&device->parent->dev) :
+					  "(null)"));
+		kfree(buffer.pointer);
 		*child = device;
-	else
+	} else
 		acpi_device_release(&device->dev);
 
 	return result;
 }
 
-static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
+#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
+			  ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING)
+
+static int acpi_bus_type_and_status(acpi_handle handle, int *type,
+				    unsigned long long *sta)
 {
-	acpi_status status = AE_OK;
-	struct acpi_device *parent = NULL;
-	struct acpi_device *child = NULL;
-	acpi_handle phandle = NULL;
-	acpi_handle chandle = NULL;
-	acpi_object_type type = 0;
-	u32 level = 1;
+	acpi_status status;
+	acpi_object_type acpi_type;
 
+	status = acpi_get_type(handle, &acpi_type);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
 
-	if (!start)
-		return -EINVAL;
-
-	parent = start;
-	phandle = start->handle;
-
-	/*
-	 * Parse through the ACPI namespace, identify all 'devices', and
-	 * create a new 'struct acpi_device' for each.
-	 */
-	while ((level > 0) && parent) {
-
-		status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
-					      chandle, &chandle);
-
-		/*
-		 * If this scope is exhausted then move our way back up.
-		 */
-		if (ACPI_FAILURE(status)) {
-			level--;
-			chandle = phandle;
-			acpi_get_parent(phandle, &phandle);
-			if (parent->parent)
-				parent = parent->parent;
-			continue;
-		}
-
-		status = acpi_get_type(chandle, &type);
+	switch (acpi_type) {
+	case ACPI_TYPE_ANY:		/* for ACPI_ROOT_OBJECT */
+	case ACPI_TYPE_DEVICE:
+		*type = ACPI_BUS_TYPE_DEVICE;
+		status = acpi_bus_get_status_handle(handle, sta);
 		if (ACPI_FAILURE(status))
-			continue;
-
-		/*
-		 * If this is a scope object then parse it (depth-first).
-		 */
-		if (type == ACPI_TYPE_LOCAL_SCOPE) {
-			level++;
-			phandle = chandle;
-			chandle = NULL;
-			continue;
-		}
-
-		/*
-		 * We're only interested in objects that we consider 'devices'.
-		 */
-		switch (type) {
-		case ACPI_TYPE_DEVICE:
-			type = ACPI_BUS_TYPE_DEVICE;
-			break;
-		case ACPI_TYPE_PROCESSOR:
-			type = ACPI_BUS_TYPE_PROCESSOR;
-			break;
-		case ACPI_TYPE_THERMAL:
-			type = ACPI_BUS_TYPE_THERMAL;
-			break;
-		case ACPI_TYPE_POWER:
-			type = ACPI_BUS_TYPE_POWER;
-			break;
-		default:
-			continue;
-		}
-
-		if (ops->acpi_op_add)
-			status = acpi_add_single_object(&child, parent,
-				chandle, type, ops);
-		else
-			status = acpi_bus_get_device(chandle, &child);
-
+			return -ENODEV;
+		break;
+	case ACPI_TYPE_PROCESSOR:
+		*type = ACPI_BUS_TYPE_PROCESSOR;
+		status = acpi_bus_get_status_handle(handle, sta);
 		if (ACPI_FAILURE(status))
-			continue;
-
-		if (ops->acpi_op_start && !(ops->acpi_op_add)) {
-			status = acpi_start_single_object(child);
-			if (ACPI_FAILURE(status))
-				continue;
-		}
-
-		/*
-		 * If the device is present, enabled, and functioning then
-		 * parse its scope (depth-first).  Note that we need to
-		 * represent absent devices to facilitate PnP notifications
-		 * -- but only the subtree head (not all of its children,
-		 * which will be enumerated when the parent is inserted).
-		 *
-		 * TBD: Need notifications and other detection mechanisms
-		 *      in place before we can fully implement this.
-		 */
-		 /*
-		 * When the device is not present but functional, it is also
-		 * necessary to scan the children of this device.
-		 */
-		if (child->status.present || (!child->status.present &&
-					child->status.functional)) {
-			status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
-						      NULL, NULL);
-			if (ACPI_SUCCESS(status)) {
-				level++;
-				phandle = chandle;
-				chandle = NULL;
-				parent = child;
-			}
-		}
+			return -ENODEV;
+		break;
+	case ACPI_TYPE_THERMAL:
+		*type = ACPI_BUS_TYPE_THERMAL;
+		*sta = ACPI_STA_DEFAULT;
+		break;
+	case ACPI_TYPE_POWER:
+		*type = ACPI_BUS_TYPE_POWER;
+		*sta = ACPI_STA_DEFAULT;
+		break;
+	default:
+		return -ENODEV;
 	}
 
 	return 0;
 }
 
+static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
+				      void *context, void **return_value)
+{
+	struct acpi_bus_ops *ops = context;
+	int type;
+	unsigned long long sta;
+	struct acpi_device *device;
+	acpi_status status;
+	int result;
+
+	result = acpi_bus_type_and_status(handle, &type, &sta);
+	if (result)
+		return AE_OK;
+
+	if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
+	    !(sta & ACPI_STA_DEVICE_FUNCTIONING))
+		return AE_CTRL_DEPTH;
+
+	/*
+	 * We may already have an acpi_device from a previous enumeration.  If
+	 * so, we needn't add it again, but we may still have to start it.
+	 */
+	device = NULL;
+	acpi_bus_get_device(handle, &device);
+	if (ops->acpi_op_add && !device)
+		acpi_add_single_object(&device, handle, type, sta, ops);
+
+	if (!device)
+		return AE_CTRL_DEPTH;
+
+	if (ops->acpi_op_start && !(ops->acpi_op_add)) {
+		status = acpi_start_single_object(device);
+		if (ACPI_FAILURE(status))
+			return AE_CTRL_DEPTH;
+	}
+
+	if (!*return_value)
+		*return_value = device;
+	return AE_OK;
+}
+
+static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
+			 struct acpi_device **child)
+{
+	acpi_status status;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	void *device = NULL;
+
+	acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+	printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
+	       (char *) buffer.pointer);
+
+	status = acpi_bus_check_add(handle, 0, ops, &device);
+	if (ACPI_SUCCESS(status))
+		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+				    acpi_bus_check_add, ops, &device);
+
+	if (child)
+		*child = device;
+	return 0;
+}
+
 int
 acpi_bus_add(struct acpi_device **child,
 	     struct acpi_device *parent, acpi_handle handle, int type)
 {
-	int result;
 	struct acpi_bus_ops ops;
 
 	memset(&ops, 0, sizeof(ops));
 	ops.acpi_op_add = 1;
 
-	result = acpi_add_single_object(child, parent, handle, type, &ops);
-	if (!result)
-		result = acpi_bus_scan(*child, &ops);
-
-	return result;
+	acpi_bus_scan(handle, &ops, child);
+	return 0;
 }
 EXPORT_SYMBOL(acpi_bus_add);
 
 int acpi_bus_start(struct acpi_device *device)
 {
-	int result;
 	struct acpi_bus_ops ops;
 
+	memset(&ops, 0, sizeof(ops));
+	ops.acpi_op_start = 1;
 
-	if (!device)
-		return -EINVAL;
-
-	result = acpi_start_single_object(device);
-	if (!result) {
-		memset(&ops, 0, sizeof(ops));
-		ops.acpi_op_start = 1;
-		result = acpi_bus_scan(device, &ops);
-	}
-	return result;
+	acpi_bus_scan(device->handle, &ops, NULL);
+	return 0;
 }
 EXPORT_SYMBOL(acpi_bus_start);
 
@@ -1572,15 +1423,12 @@
 }
 EXPORT_SYMBOL_GPL(acpi_bus_trim);
 
-static int acpi_bus_scan_fixed(struct acpi_device *root)
+static int acpi_bus_scan_fixed(void)
 {
 	int result = 0;
 	struct acpi_device *device = NULL;
 	struct acpi_bus_ops ops;
 
-	if (!root)
-		return -ENODEV;
-
 	memset(&ops, 0, sizeof(ops));
 	ops.acpi_op_add = 1;
 	ops.acpi_op_start = 1;
@@ -1589,16 +1437,16 @@
 	 * Enumerate all fixed-feature devices.
 	 */
 	if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
-		result = acpi_add_single_object(&device, acpi_root,
-						NULL,
+		result = acpi_add_single_object(&device, NULL,
 						ACPI_BUS_TYPE_POWER_BUTTON,
+						ACPI_STA_DEFAULT,
 						&ops);
 	}
 
 	if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
-		result = acpi_add_single_object(&device, acpi_root,
-						NULL,
+		result = acpi_add_single_object(&device, NULL,
 						ACPI_BUS_TYPE_SLEEP_BUTTON,
+						ACPI_STA_DEFAULT,
 						&ops);
 	}
 
@@ -1621,24 +1469,15 @@
 	}
 
 	/*
-	 * Create the root device in the bus's device tree
-	 */
-	result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
-					ACPI_BUS_TYPE_SYSTEM, &ops);
-	if (result)
-		goto Done;
-
-	/*
 	 * Enumerate devices in the ACPI namespace.
 	 */
-	result = acpi_bus_scan_fixed(acpi_root);
+	result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
 
 	if (!result)
-		result = acpi_bus_scan(acpi_root, &ops);
+		result = acpi_bus_scan_fixed();
 
 	if (result)
 		acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
 
-Done:
 	return result;
 }
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 94b1a4c..a4fddb2 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1986,6 +1986,10 @@
 
 	result = acpi_video_device_lcd_set_level(device, level_next);
 
+	if (!result)
+		backlight_force_update(device->backlight,
+				       BACKLIGHT_UPDATE_HOTKEY);
+
 out:
 	if (result)
 		printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 276a046..b4a55d4 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -369,9 +369,8 @@
 		goto err;
 
 	snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
-		"SMBus CMI adapter %s (%s)",
-		acpi_device_name(device),
-		acpi_device_uid(device));
+		"SMBus CMI adapter %s",
+		acpi_device_name(device));
 	smbus_cmi->adapter.owner = THIS_MODULE;
 	smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
 	smbus_cmi->adapter.algo_data = smbus_cmi;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7c8e712..e4f599f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -150,9 +150,9 @@
 	tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
 	depends on LEDS_CLASS && I2C
 	help
-    This option enables support for LEDs connected to the National
-    Semiconductor LP3944 Lighting Management Unit (LMU) also known as
-    Fun Light Chip.
+	  This option enables support for LEDs connected to the National
+	  Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+	  Fun Light Chip.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called leds-lp3944.
@@ -195,6 +195,13 @@
 	  LED driver chips accessed via the I2C bus.  Supported
 	  devices include PCA9550, PCA9551, PCA9552, and PCA9553.
 
+config LEDS_WM831X_STATUS
+	tristate "LED support for status LEDs on WM831x PMICs"
+	depends on LEDS_CLASS && MFD_WM831X
+	help
+	  This option enables support for the status LEDs of the WM831x
+          series of PMICs.
+
 config LEDS_WM8350
 	tristate "LED Support for WM8350 AudioPlus PMIC"
 	depends on LEDS_CLASS && MFD_WM8350
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e8cdcf7..46d7270 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)		+= leds-pca955x.o
 obj-$(CONFIG_LEDS_DA903X)		+= leds-da903x.o
+obj-$(CONFIG_LEDS_WM831X_STATUS)	+= leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
 obj-$(CONFIG_LEDS_PWM)			+= leds-pwm.o
 
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index f2242db..a498135 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -153,7 +153,7 @@
 	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
-static int __init clevo_mail_led_probe(struct platform_device *pdev)
+static int __devinit clevo_mail_led_probe(struct platform_device *pdev)
 {
 	return led_classdev_register(&pdev->dev, &clevo_mail_led);
 }
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 059aa29..8816806 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -28,7 +28,7 @@
 }
 
 static struct led_classdev qube_front_led = {
-	.name			= "qube-front",
+	.name			= "qube::front",
 	.brightness		= LED_FULL,
 	.brightness_set		= qube_front_led_set,
 	.default_trigger	= "ide-disk",
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 5f1ce81..defc212 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -49,7 +49,7 @@
 }
 
 static struct led_classdev raq_web_led = {
-	.name		= "raq-web",
+	.name		= "raq::web",
 	.brightness_set	= raq_web_led_set,
 };
 
@@ -70,7 +70,7 @@
 }
 
 static struct led_classdev raq_power_off_led = {
-	.name			= "raq-power-off",
+	.name			= "raq::power-off",
 	.brightness_set		= raq_power_off_led_set,
 	.default_trigger	= "power-off",
 };
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 6b06638..7467980 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -80,7 +80,7 @@
 
 	/* skip leds that aren't available */
 	if (!gpio_is_valid(template->gpio)) {
-		printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", 
+		printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
 				template->gpio, template->name);
 		return 0;
 	}
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index dba8921..708a801 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -34,7 +34,7 @@
 	struct i2c_client *client;
 	struct pca9532_led leds[16];
 	struct mutex update_lock;
-	struct input_dev    *idev;
+	struct input_dev *idev;
 	struct work_struct work;
 	u8 pwm[2];
 	u8 psc[2];
@@ -53,9 +53,9 @@
 
 static struct i2c_driver pca9532_driver = {
 	.driver = {
-		.name   = "pca9532",
+		.name = "pca9532",
 	},
-	.probe  = pca9532_probe,
+	.probe = pca9532_probe,
 	.remove = pca9532_remove,
 	.id_table = pca9532_id,
 };
@@ -149,7 +149,7 @@
 
 	if (*delay_on == 0 && *delay_off == 0) {
 	/* led subsystem ask us for a blink rate */
-		*delay_on  = 1000;
+		*delay_on = 1000;
 		*delay_off = 1000;
 	}
 	if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
@@ -227,7 +227,7 @@
 			break;
 		case PCA9532_TYPE_LED:
 			led->state = pled->state;
-			led->name =  pled->name;
+			led->name = pled->name;
 			led->ldev.name = led->name;
 			led->ldev.brightness = LED_OFF;
 			led->ldev.brightness_set = pca9532_set_brightness;
@@ -254,7 +254,7 @@
 			data->idev->name = pled->name;
 			data->idev->phys = "i2c/pca9532";
 			data->idev->id.bustype = BUS_HOST;
-			data->idev->id.vendor  = 0x001f;
+			data->idev->id.vendor = 0x001f;
 			data->idev->id.product = 0x0001;
 			data->idev->id.version = 0x0100;
 			data->idev->evbit[0] = BIT_MASK(EV_SND);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
new file mode 100644
index 0000000..c586d05
--- /dev/null
+++ b/drivers/leds/leds-wm831x-status.c
@@ -0,0 +1,341 @@
+/*
+ * LED driver for WM831x status LEDs
+ *
+ * Copyright(C) 2009 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/status.h>
+
+
+struct wm831x_status {
+	struct led_classdev cdev;
+	struct wm831x *wm831x;
+	struct work_struct work;
+	struct mutex mutex;
+
+	spinlock_t value_lock;
+	int reg;     /* Control register */
+	int reg_val; /* Control register value */
+
+	int blink;
+	int blink_time;
+	int blink_cyc;
+	int src;
+	enum led_brightness brightness;
+};
+
+#define to_wm831x_status(led_cdev) \
+	container_of(led_cdev, struct wm831x_status, cdev)
+
+static void wm831x_status_work(struct work_struct *work)
+{
+	struct wm831x_status *led = container_of(work, struct wm831x_status,
+						 work);
+	unsigned long flags;
+
+	mutex_lock(&led->mutex);
+
+	led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK |
+			  WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK);
+
+	spin_lock_irqsave(&led->value_lock, flags);
+
+	led->reg_val |= led->src << WM831X_LED_SRC_SHIFT;
+	if (led->blink) {
+		led->reg_val |= 2 << WM831X_LED_MODE_SHIFT;
+		led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT;
+		led->reg_val |= led->blink_cyc;
+	} else {
+		if (led->brightness != LED_OFF)
+			led->reg_val |= 1 << WM831X_LED_MODE_SHIFT;
+	}
+
+	spin_unlock_irqrestore(&led->value_lock, flags);
+
+	wm831x_reg_write(led->wm831x, led->reg, led->reg_val);
+
+	mutex_unlock(&led->mutex);
+}
+
+static void wm831x_status_set(struct led_classdev *led_cdev,
+			   enum led_brightness value)
+{
+	struct wm831x_status *led = to_wm831x_status(led_cdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	led->brightness = value;
+	if (value == LED_OFF)
+		led->blink = 0;
+	schedule_work(&led->work);
+	spin_unlock_irqrestore(&led->value_lock, flags);
+}
+
+static int wm831x_status_blink_set(struct led_classdev *led_cdev,
+				   unsigned long *delay_on,
+				   unsigned long *delay_off)
+{
+	struct wm831x_status *led = to_wm831x_status(led_cdev);
+	unsigned long flags;
+	int ret = 0;
+
+	/* Pick some defaults if we've not been given times */
+	if (*delay_on == 0 && *delay_off == 0) {
+		*delay_on = 250;
+		*delay_off = 250;
+	}
+
+	spin_lock_irqsave(&led->value_lock, flags);
+
+	/* We only have a limited selection of settings, see if we can
+	 * support the configuration we're being given */
+	switch (*delay_on) {
+	case 1000:
+		led->blink_time = 0;
+		break;
+	case 250:
+		led->blink_time = 1;
+		break;
+	case 125:
+		led->blink_time = 2;
+		break;
+	case 62:
+	case 63:
+		/* Actually 62.5ms */
+		led->blink_time = 3;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret == 0) {
+		switch (*delay_off / *delay_on) {
+		case 1:
+			led->blink_cyc = 0;
+			break;
+		case 3:
+			led->blink_cyc = 1;
+			break;
+		case 4:
+			led->blink_cyc = 2;
+			break;
+		case 8:
+			led->blink_cyc = 3;
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret == 0)
+		led->blink = 1;
+	else
+		led->blink = 0;
+
+	/* Always update; if we fail turn off blinking since we expect
+	 * a software fallback. */
+	schedule_work(&led->work);
+
+	spin_unlock_irqrestore(&led->value_lock, flags);
+
+	return ret;
+}
+
+static const char *led_src_texts[] = {
+	"otp",
+	"power",
+	"charger",
+	"soft",
+};
+
+static ssize_t wm831x_status_src_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct wm831x_status *led = to_wm831x_status(led_cdev);
+	int i;
+	ssize_t ret = 0;
+
+	mutex_lock(&led->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(led_src_texts); i++)
+		if (i == led->src)
+			ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]);
+		else
+			ret += sprintf(&buf[ret], "%s ", led_src_texts[i]);
+
+	mutex_unlock(&led->mutex);
+
+	ret += sprintf(&buf[ret], "\n");
+
+	return ret;
+}
+
+static ssize_t wm831x_status_src_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct wm831x_status *led = to_wm831x_status(led_cdev);
+	char name[20];
+	int i;
+	size_t len;
+
+	name[sizeof(name) - 1] = '\0';
+	strncpy(name, buf, sizeof(name) - 1);
+	len = strlen(name);
+
+	if (len && name[len - 1] == '\n')
+		name[len - 1] = '\0';
+
+	for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) {
+		if (!strcmp(name, led_src_texts[i])) {
+			mutex_lock(&led->mutex);
+
+			led->src = i;
+			schedule_work(&led->work);
+
+			mutex_unlock(&led->mutex);
+		}
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
+
+static int wm831x_status_probe(struct platform_device *pdev)
+{
+	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+	struct wm831x_pdata *chip_pdata;
+	struct wm831x_status_pdata pdata;
+	struct wm831x_status *drvdata;
+	struct resource *res;
+	int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "No I/O resource\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	dev_set_drvdata(&pdev->dev, drvdata);
+
+	drvdata->wm831x = wm831x;
+	drvdata->reg = res->start;
+
+	if (wm831x->dev->platform_data)
+		chip_pdata = wm831x->dev->platform_data;
+	else
+		chip_pdata = NULL;
+
+	memset(&pdata, 0, sizeof(pdata));
+	if (chip_pdata && chip_pdata->status[id])
+		memcpy(&pdata, chip_pdata->status[id], sizeof(pdata));
+	else
+		pdata.name = dev_name(&pdev->dev);
+
+	mutex_init(&drvdata->mutex);
+	INIT_WORK(&drvdata->work, wm831x_status_work);
+	spin_lock_init(&drvdata->value_lock);
+
+	/* We cache the configuration register and read startup values
+	 * from it. */
+	drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg);
+
+	if (drvdata->reg_val & WM831X_LED_MODE_MASK)
+		drvdata->brightness = LED_FULL;
+	else
+		drvdata->brightness = LED_OFF;
+
+	/* Set a default source if configured, otherwise leave the
+	 * current hardware setting.
+	 */
+	if (pdata.default_src == WM831X_STATUS_PRESERVE) {
+		drvdata->src = drvdata->reg_val;
+		drvdata->src &= WM831X_LED_SRC_MASK;
+		drvdata->src >>= WM831X_LED_SRC_SHIFT;
+	} else {
+		drvdata->src = pdata.default_src - 1;
+	}
+
+	drvdata->cdev.name = pdata.name;
+	drvdata->cdev.default_trigger = pdata.default_trigger;
+	drvdata->cdev.brightness_set = wm831x_status_set;
+	drvdata->cdev.blink_set = wm831x_status_blink_set;
+
+	ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
+		goto err_led;
+	}
+
+	ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
+	if (ret != 0)
+		dev_err(&pdev->dev,
+			"No source control for LED: %d\n", ret);
+
+	return 0;
+
+err_led:
+	led_classdev_unregister(&drvdata->cdev);
+	kfree(drvdata);
+err:
+	return ret;
+}
+
+static int wm831x_status_remove(struct platform_device *pdev)
+{
+	struct wm831x_status *drvdata = platform_get_drvdata(pdev);
+
+	device_remove_file(drvdata->cdev.dev, &dev_attr_src);
+	led_classdev_unregister(&drvdata->cdev);
+	kfree(drvdata);
+
+	return 0;
+}
+
+static struct platform_driver wm831x_status_driver = {
+	.driver = {
+		   .name = "wm831x-status",
+		   .owner = THIS_MODULE,
+		   },
+	.probe = wm831x_status_probe,
+	.remove = wm831x_status_remove,
+};
+
+static int __devinit wm831x_status_init(void)
+{
+	return platform_driver_register(&wm831x_status_driver);
+}
+module_init(wm831x_status_init);
+
+static void wm831x_status_exit(void)
+{
+	platform_driver_unregister(&wm831x_status_driver);
+}
+module_exit(wm831x_status_exit);
+
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM831x status LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-status");
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 1bc5db4..f591337 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -44,22 +44,22 @@
 			struct gpio_trig_data, work);
 	int tmp;
 
-       if (!gpio_data->gpio)
-	       return;
+	if (!gpio_data->gpio)
+		return;
 
-       tmp = gpio_get_value(gpio_data->gpio);
-       if (gpio_data->inverted)
-	       tmp = !tmp;
+	tmp = gpio_get_value(gpio_data->gpio);
+	if (gpio_data->inverted)
+		tmp = !tmp;
 
-       if (tmp) {
-               if (gpio_data->desired_brightness)
-                       led_set_brightness(gpio_data->led,
-                                       gpio_data->desired_brightness);
-               else
-                       led_set_brightness(gpio_data->led, LED_FULL);
-       } else {
-               led_set_brightness(gpio_data->led, LED_OFF);
-       }
+	if (tmp) {
+		if (gpio_data->desired_brightness)
+			led_set_brightness(gpio_data->led,
+					   gpio_data->desired_brightness);
+		else
+			led_set_brightness(gpio_data->led, LED_FULL);
+	} else {
+		led_set_brightness(gpio_data->led, LED_OFF);
+	}
 }
 
 static ssize_t gpio_trig_brightness_show(struct device *dev,
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 55ad956..d242976 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -72,7 +72,7 @@
 }
 
 static struct led_classdev pmu_led = {
-	.name = "pmu-front-led",
+	.name = "pmu-led::front",
 #ifdef CONFIG_ADB_PMU_LED_IDE
 	.default_trigger = "ide-disk",
 #endif
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 0000000..f67ae28
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
+/*
+ * at91_can.c - CAN network driver for AT91 SoC CAN controller
+ *
+ * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2 as distributed in the 'COPYING'
+ * file from the main directory of the linux kernel source.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct at91_can_data ek_can_data = {
+ *	transceiver_switch = sam9263ek_transceiver_switch,
+ * };
+ *
+ * at91_add_device_can(&ek_can_data);
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <mach/board.h>
+
+#define DRV_NAME		"at91_can"
+#define AT91_NAPI_WEIGHT	12
+
+/*
+ * RX/TX Mailbox split
+ * don't dare to touch
+ */
+#define AT91_MB_RX_NUM		12
+#define AT91_MB_TX_SHIFT	2
+
+#define AT91_MB_RX_FIRST	0
+#define AT91_MB_RX_LAST		(AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
+
+#define AT91_MB_RX_MASK(i)	((1 << (i)) - 1)
+#define AT91_MB_RX_SPLIT	8
+#define AT91_MB_RX_LOW_LAST	(AT91_MB_RX_SPLIT - 1)
+#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+
+#define AT91_MB_TX_NUM		(1 << AT91_MB_TX_SHIFT)
+#define AT91_MB_TX_FIRST	(AT91_MB_RX_LAST + 1)
+#define AT91_MB_TX_LAST		(AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
+
+#define AT91_NEXT_PRIO_SHIFT	(AT91_MB_TX_SHIFT)
+#define AT91_NEXT_PRIO_MASK	(0xf << AT91_MB_TX_SHIFT)
+#define AT91_NEXT_MB_MASK	(AT91_MB_TX_NUM - 1)
+#define AT91_NEXT_MASK		((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+
+/* Common registers */
+enum at91_reg {
+	AT91_MR		= 0x000,
+	AT91_IER	= 0x004,
+	AT91_IDR	= 0x008,
+	AT91_IMR	= 0x00C,
+	AT91_SR		= 0x010,
+	AT91_BR		= 0x014,
+	AT91_TIM	= 0x018,
+	AT91_TIMESTP	= 0x01C,
+	AT91_ECR	= 0x020,
+	AT91_TCR	= 0x024,
+	AT91_ACR	= 0x028,
+};
+
+/* Mailbox registers (0 <= i <= 15) */
+#define AT91_MMR(i)		(enum at91_reg)(0x200 + ((i) * 0x20))
+#define AT91_MAM(i)		(enum at91_reg)(0x204 + ((i) * 0x20))
+#define AT91_MID(i)		(enum at91_reg)(0x208 + ((i) * 0x20))
+#define AT91_MFID(i)		(enum at91_reg)(0x20C + ((i) * 0x20))
+#define AT91_MSR(i)		(enum at91_reg)(0x210 + ((i) * 0x20))
+#define AT91_MDL(i)		(enum at91_reg)(0x214 + ((i) * 0x20))
+#define AT91_MDH(i)		(enum at91_reg)(0x218 + ((i) * 0x20))
+#define AT91_MCR(i)		(enum at91_reg)(0x21C + ((i) * 0x20))
+
+/* Register bits */
+#define AT91_MR_CANEN		BIT(0)
+#define AT91_MR_LPM		BIT(1)
+#define AT91_MR_ABM		BIT(2)
+#define AT91_MR_OVL		BIT(3)
+#define AT91_MR_TEOF		BIT(4)
+#define AT91_MR_TTM		BIT(5)
+#define AT91_MR_TIMFRZ		BIT(6)
+#define AT91_MR_DRPT		BIT(7)
+
+#define AT91_SR_RBSY		BIT(29)
+
+#define AT91_MMR_PRIO_SHIFT	(16)
+
+#define AT91_MID_MIDE		BIT(29)
+
+#define AT91_MSR_MRTR		BIT(20)
+#define AT91_MSR_MABT		BIT(22)
+#define AT91_MSR_MRDY		BIT(23)
+#define AT91_MSR_MMI		BIT(24)
+
+#define AT91_MCR_MRTR		BIT(20)
+#define AT91_MCR_MTCR		BIT(23)
+
+/* Mailbox Modes */
+enum at91_mb_mode {
+	AT91_MB_MODE_DISABLED	= 0,
+	AT91_MB_MODE_RX		= 1,
+	AT91_MB_MODE_RX_OVRWR	= 2,
+	AT91_MB_MODE_TX		= 3,
+	AT91_MB_MODE_CONSUMER	= 4,
+	AT91_MB_MODE_PRODUCER	= 5,
+};
+
+/* Interrupt mask bits */
+#define AT91_IRQ_MB_RX		((1 << (AT91_MB_RX_LAST + 1)) \
+				 - (1 << AT91_MB_RX_FIRST))
+#define AT91_IRQ_MB_TX		((1 << (AT91_MB_TX_LAST + 1)) \
+				 - (1 << AT91_MB_TX_FIRST))
+#define AT91_IRQ_MB_ALL		(AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
+
+#define AT91_IRQ_ERRA		(1 << 16)
+#define AT91_IRQ_WARN		(1 << 17)
+#define AT91_IRQ_ERRP		(1 << 18)
+#define AT91_IRQ_BOFF		(1 << 19)
+#define AT91_IRQ_SLEEP		(1 << 20)
+#define AT91_IRQ_WAKEUP		(1 << 21)
+#define AT91_IRQ_TOVF		(1 << 22)
+#define AT91_IRQ_TSTP		(1 << 23)
+#define AT91_IRQ_CERR		(1 << 24)
+#define AT91_IRQ_SERR		(1 << 25)
+#define AT91_IRQ_AERR		(1 << 26)
+#define AT91_IRQ_FERR		(1 << 27)
+#define AT91_IRQ_BERR		(1 << 28)
+
+#define AT91_IRQ_ERR_ALL	(0x1fff0000)
+#define AT91_IRQ_ERR_FRAME	(AT91_IRQ_CERR | AT91_IRQ_SERR | \
+				 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
+#define AT91_IRQ_ERR_LINE	(AT91_IRQ_ERRA | AT91_IRQ_WARN | \
+				 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
+
+#define AT91_IRQ_ALL		(0x1fffffff)
+
+struct at91_priv {
+	struct can_priv		can;	   /* must be the first member! */
+	struct net_device	*dev;
+	struct napi_struct	napi;
+
+	void __iomem		*reg_base;
+
+	u32			reg_sr;
+	unsigned int		tx_next;
+	unsigned int		tx_echo;
+	unsigned int		rx_next;
+
+	struct clk		*clk;
+	struct at91_can_data	*pdata;
+};
+
+static struct can_bittiming_const at91_bittiming_const = {
+	.tseg1_min	= 4,
+	.tseg1_max	= 16,
+	.tseg2_min	= 2,
+	.tseg2_max	= 8,
+	.sjw_max	= 4,
+	.brp_min 	= 2,
+	.brp_max	= 128,
+	.brp_inc	= 1,
+};
+
+static inline int get_tx_next_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline int get_tx_next_prio(const struct at91_priv *priv)
+{
+	return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+}
+
+static inline int get_tx_echo_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
+		u32 value)
+{
+	writel(value, priv->reg_base + reg);
+}
+
+static inline void set_mb_mode_prio(const struct at91_priv *priv,
+		unsigned int mb, enum at91_mb_mode mode, int prio)
+{
+	at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
+}
+
+static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
+		enum at91_mb_mode mode)
+{
+	set_mb_mode_prio(priv, mb, mode, 0);
+}
+
+static struct sk_buff *alloc_can_skb(struct net_device *dev,
+		struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+	if (unlikely(!skb))
+		return NULL;
+
+	skb->protocol = htons(ETH_P_CAN);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+	return skb;
+}
+
+static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+		struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, cf);
+	if (unlikely(!skb))
+		return NULL;
+
+	memset(*cf, 0, sizeof(struct can_frame));
+	(*cf)->can_id = CAN_ERR_FLAG;
+	(*cf)->can_dlc = CAN_ERR_DLC;
+
+	return skb;
+}
+
+/*
+ * Swtich transceiver on or off
+ */
+static void at91_transceiver_switch(const struct at91_priv *priv, int on)
+{
+	if (priv->pdata && priv->pdata->transceiver_switch)
+		priv->pdata->transceiver_switch(on);
+}
+
+static void at91_setup_mailboxes(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	unsigned int i;
+
+	/*
+	 * The first 12 mailboxes are used as a reception FIFO. The
+	 * last mailbox is configured with overwrite option. The
+	 * overwrite flag indicates a FIFO overflow.
+	 */
+	for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+		set_mb_mode(priv, i, AT91_MB_MODE_RX);
+	set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+
+	/* The last 4 mailboxes are used for transmitting. */
+	for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+		set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
+
+	/* Reset tx and rx helper pointers */
+	priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+}
+
+static int at91_set_bittiming(struct net_device *dev)
+{
+	const struct at91_priv *priv = netdev_priv(dev);
+	const struct can_bittiming *bt = &priv->can.bittiming;
+	u32 reg_br;
+
+	reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
+		((bt->brp - 1) << 16) |	((bt->sjw - 1) << 12) |
+		((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
+		((bt->phase_seg2 - 1) << 0);
+
+	dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+
+	at91_write(priv, AT91_BR, reg_br);
+
+	return 0;
+}
+
+static void at91_chip_start(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_mr, reg_ier;
+
+	/* disable interrupts */
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+	/* disable chip */
+	reg_mr = at91_read(priv, AT91_MR);
+	at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+	at91_setup_mailboxes(dev);
+	at91_transceiver_switch(priv, 1);
+
+	/* enable chip */
+	at91_write(priv, AT91_MR, AT91_MR_CANEN);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	/* Enable interrupts */
+	reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+	at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_chip_stop(struct net_device *dev, enum can_state state)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_mr;
+
+	/* disable interrupts */
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+	reg_mr = at91_read(priv, AT91_MR);
+	at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+	at91_transceiver_switch(priv, 0);
+	priv->can.state = state;
+}
+
+/*
+ * theory of operation:
+ *
+ * According to the datasheet priority 0 is the highest priority, 15
+ * is the lowest. If two mailboxes have the same priority level the
+ * message of the mailbox with the lowest number is sent first.
+ *
+ * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
+ * the next mailbox with prio 0, and so on, until all mailboxes are
+ * used. Then we start from the beginning with mailbox
+ * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
+ * prio 1. When we reach the last mailbox with prio 15, we have to
+ * stop sending, waiting for all messages to be delivered, then start
+ * again with mailbox AT91_MB_TX_FIRST prio 0.
+ *
+ * We use the priv->tx_next as counter for the next transmission
+ * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
+ * encode the mailbox number, the upper 4 bits the mailbox priority:
+ *
+ * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
+ *                 (mb - AT91_MB_TX_FIRST);
+ *
+ */
+static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	unsigned int mb, prio;
+	u32 reg_mid, reg_mcr;
+
+	mb = get_tx_next_mb(priv);
+	prio = get_tx_next_prio(priv);
+
+	if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
+		netif_stop_queue(dev);
+
+		dev_err(dev->dev.parent,
+			"BUG! TX buffer full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (cf->can_id & CAN_EFF_FLAG)
+		reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+	else
+		reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
+
+	reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
+		(cf->can_dlc << 16) | AT91_MCR_MTCR;
+
+	/* disable MB while writing ID (see datasheet) */
+	set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
+	at91_write(priv, AT91_MID(mb), reg_mid);
+	set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
+
+	at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
+	at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
+
+	/* This triggers transmission */
+	at91_write(priv, AT91_MCR(mb), reg_mcr);
+
+	stats->tx_bytes += cf->can_dlc;
+	dev->trans_start = jiffies;
+
+	/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+	can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+
+	/*
+	 * we have to stop the queue and deliver all messages in case
+	 * of a prio+mb counter wrap around. This is the case if
+	 * tx_next buffer prio and mailbox equals 0.
+	 *
+	 * also stop the queue if next buffer is still in use
+	 * (== not ready)
+	 */
+	priv->tx_next++;
+	if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
+	      AT91_MSR_MRDY) ||
+	    (priv->tx_next & AT91_NEXT_MASK) == 0)
+		netif_stop_queue(dev);
+
+	/* Enable interrupt for this mailbox */
+	at91_write(priv, AT91_IER, 1 << mb);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * at91_activate_rx_low - activate lower rx mailboxes
+ * @priv: a91 context
+ *
+ * Reenables the lower mailboxes for reception of new CAN messages
+ */
+static inline void at91_activate_rx_low(const struct at91_priv *priv)
+{
+	u32 mask = AT91_MB_RX_LOW_MASK;
+	at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_activate_rx_mb - reactive single rx mailbox
+ * @priv: a91 context
+ * @mb: mailbox to reactivate
+ *
+ * Reenables given mailbox for reception of new CAN messages
+ */
+static inline void at91_activate_rx_mb(const struct at91_priv *priv,
+		unsigned int mb)
+{
+	u32 mask = 1 << mb;
+	at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_rx_overflow_err - send error frame due to rx overflow
+ * @dev: net device
+ */
+static void at91_rx_overflow_err(struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+	stats->rx_over_errors++;
+	stats->rx_errors++;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return;
+
+	cf->can_id |= CAN_ERR_CRTL;
+	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
+ * @dev: net device
+ * @mb: mailbox number to read from
+ * @cf: can frame where to store message
+ *
+ * Reads a CAN message from the given mailbox and stores data into
+ * given can frame. "mb" and "cf" must be valid.
+ */
+static void at91_read_mb(struct net_device *dev, unsigned int mb,
+		struct can_frame *cf)
+{
+	const struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_msr, reg_mid;
+
+	reg_mid = at91_read(priv, AT91_MID(mb));
+	if (reg_mid & AT91_MID_MIDE)
+		cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
+
+	reg_msr = at91_read(priv, AT91_MSR(mb));
+	if (reg_msr & AT91_MSR_MRTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+
+	*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+	*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+
+	if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+		at91_rx_overflow_err(dev);
+}
+
+/**
+ * at91_read_msg - read CAN message from mailbox
+ * @dev: net device
+ * @mb: mail box to read from
+ *
+ * Reads a CAN message from given mailbox, and put into linux network
+ * RX queue, does all housekeeping chores (stats, ...)
+ */
+static void at91_read_msg(struct net_device *dev, unsigned int mb)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, &cf);
+	if (unlikely(!skb)) {
+		stats->rx_dropped++;
+		return;
+	}
+
+	at91_read_mb(dev, mb, cf);
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_poll_rx - read multiple CAN messages from mailboxes
+ * @dev: net device
+ * @quota: max number of pkgs we're allowed to receive
+ *
+ * Theory of Operation:
+ *
+ * 12 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ *
+ * Like it or not, but the chip always saves a received CAN message
+ * into the first free mailbox it finds (starting with the
+ * lowest). This makes it very difficult to read the messages in the
+ * right order from the chip. This is how we work around that problem:
+ *
+ * The first message goes into mb nr. 0 and issues an interrupt. All
+ * rx ints are disabled in the interrupt handler and a napi poll is
+ * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * receive another message).
+ *
+ *    lower mbxs      upper
+ *   ______^______    __^__
+ *  /             \  /     \
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * |x|x|x|x|x|x|x|x|| | | | |
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
+ *  0 1 2 3 4 5  6 7 8 9 0 1  / box
+ *
+ * The variable priv->rx_next points to the next mailbox to read a
+ * message from. As long we're in the lower mailboxes we just read the
+ * mailbox but not reenable it.
+ *
+ * With completion of the last of the lower mailboxes, we reenable the
+ * whole first group, but continue to look for filled mailboxes in the
+ * upper mailboxes. Imagine the second group like overflow mailboxes,
+ * which takes CAN messages if the lower goup is full. While in the
+ * upper group we reenable the mailbox right after reading it. Giving
+ * the chip more room to store messages.
+ *
+ * After finishing we look again in the lower group if we've still
+ * quota.
+ *
+ */
+static int at91_poll_rx(struct net_device *dev, int quota)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_sr = at91_read(priv, AT91_SR);
+	const unsigned long *addr = (unsigned long *)&reg_sr;
+	unsigned int mb;
+	int received = 0;
+
+	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+	    reg_sr & AT91_MB_RX_LOW_MASK)
+		dev_info(dev->dev.parent,
+			 "order of incoming frames cannot be guaranteed\n");
+
+ again:
+	for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
+	     mb < AT91_MB_RX_NUM && quota > 0;
+	     reg_sr = at91_read(priv, AT91_SR),
+	     mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+		at91_read_msg(dev, mb);
+
+		/* reactivate mailboxes */
+		if (mb == AT91_MB_RX_LOW_LAST)
+			/* all lower mailboxed, if just finished it */
+			at91_activate_rx_low(priv);
+		else if (mb > AT91_MB_RX_LOW_LAST)
+			/* only the mailbox we read */
+			at91_activate_rx_mb(priv, mb);
+
+		received++;
+		quota--;
+	}
+
+	/* upper group completed, look again in lower */
+	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+	    quota > 0 && mb >= AT91_MB_RX_NUM) {
+		priv->rx_next = 0;
+		goto again;
+	}
+
+	return received;
+}
+
+static void at91_poll_err_frame(struct net_device *dev,
+		struct can_frame *cf, u32 reg_sr)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+
+	/* CRC error */
+	if (reg_sr & AT91_IRQ_CERR) {
+		dev_dbg(dev->dev.parent, "CERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+	}
+
+	/* Stuffing Error */
+	if (reg_sr & AT91_IRQ_SERR) {
+		dev_dbg(dev->dev.parent, "SERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+	}
+
+	/* Acknowledgement Error */
+	if (reg_sr & AT91_IRQ_AERR) {
+		dev_dbg(dev->dev.parent, "AERR irq\n");
+		dev->stats.tx_errors++;
+		cf->can_id |= CAN_ERR_ACK;
+	}
+
+	/* Form error */
+	if (reg_sr & AT91_IRQ_FERR) {
+		dev_dbg(dev->dev.parent, "FERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+	}
+
+	/* Bit Error */
+	if (reg_sr & AT91_IRQ_BERR) {
+		dev_dbg(dev->dev.parent, "BERR irq\n");
+		dev->stats.tx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+	}
+}
+
+static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
+{
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	if (quota == 0)
+		return 0;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	at91_poll_err_frame(dev, cf, reg_sr);
+	netif_receive_skb(skb);
+
+	dev->last_rx = jiffies;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static int at91_poll(struct napi_struct *napi, int quota)
+{
+	struct net_device *dev = napi->dev;
+	const struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_sr = at91_read(priv, AT91_SR);
+	int work_done = 0;
+
+	if (reg_sr & AT91_IRQ_MB_RX)
+		work_done += at91_poll_rx(dev, quota - work_done);
+
+	/*
+	 * The error bits are clear on read,
+	 * so use saved value from irq handler.
+	 */
+	reg_sr |= priv->reg_sr;
+	if (reg_sr & AT91_IRQ_ERR_FRAME)
+		work_done += at91_poll_err(dev, quota - work_done, reg_sr);
+
+	if (work_done < quota) {
+		/* enable IRQs for frame errors and all mailboxes >= rx_next */
+		u32 reg_ier = AT91_IRQ_ERR_FRAME;
+		reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+
+		napi_complete(napi);
+		at91_write(priv, AT91_IER, reg_ier);
+	}
+
+	return work_done;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework. If
+ * we discover a not yet transmitted package, stop looking for more.
+ *
+ */
+static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_msr;
+	unsigned int mb;
+
+	/* masking of reg_sr not needed, already done by at91_irq */
+
+	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+		mb = get_tx_echo_mb(priv);
+
+		/* no event in mailbox? */
+		if (!(reg_sr & (1 << mb)))
+			break;
+
+		/* Disable irq for this TX mailbox */
+		at91_write(priv, AT91_IDR, 1 << mb);
+
+		/*
+		 * only echo if mailbox signals us a transfer
+		 * complete (MSR_MRDY). Otherwise it's a tansfer
+		 * abort. "can_bus_off()" takes care about the skbs
+		 * parked in the echo queue.
+		 */
+		reg_msr = at91_read(priv, AT91_MSR(mb));
+		if (likely(reg_msr & AT91_MSR_MRDY &&
+			   ~reg_msr & AT91_MSR_MABT)) {
+			/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+			can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+			dev->stats.tx_packets++;
+		}
+	}
+
+	/*
+	 * restart queue if we don't have a wrap around but restart if
+	 * we get a TX int for the last can frame directly before a
+	 * wrap around.
+	 */
+	if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
+	    (priv->tx_echo & AT91_NEXT_MASK) == 0)
+		netif_wake_queue(dev);
+}
+
+static void at91_irq_err_state(struct net_device *dev,
+		struct can_frame *cf, enum can_state new_state)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_idr, reg_ier, reg_ecr;
+	u8 tec, rec;
+
+	reg_ecr = at91_read(priv, AT91_ECR);
+	rec = reg_ecr & 0xff;
+	tec = reg_ecr >> 16;
+
+	switch (priv->can.state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * from: ERROR_ACTIVE
+		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+		 * =>  : there was a warning int
+		 */
+		if (new_state >= CAN_STATE_ERROR_WARNING &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+			priv->can.can_stats.error_warning++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (tec > rec) ?
+				CAN_ERR_CRTL_TX_WARNING :
+				CAN_ERR_CRTL_RX_WARNING;
+		}
+	case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+		/*
+		 * from: ERROR_ACTIVE, ERROR_WARNING
+		 * to  : ERROR_PASSIVE, BUS_OFF
+		 * =>  : error passive int
+		 */
+		if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+			priv->can.can_stats.error_passive++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (tec > rec) ?
+				CAN_ERR_CRTL_TX_PASSIVE :
+				CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		break;
+	case CAN_STATE_BUS_OFF:
+		/*
+		 * from: BUS_OFF
+		 * to  : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
+		 */
+		if (new_state <= CAN_STATE_ERROR_PASSIVE) {
+			cf->can_id |= CAN_ERR_RESTARTED;
+
+			dev_dbg(dev->dev.parent, "restarted\n");
+			priv->can.can_stats.restarts++;
+
+			netif_carrier_on(dev);
+			netif_wake_queue(dev);
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	/* process state changes depending on the new state */
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * actually we want to enable AT91_IRQ_WARN here, but
+		 * it screws up the system under certain
+		 * circumstances. so just enable AT91_IRQ_ERRP, thus
+		 * the "fallthrough"
+		 */
+		dev_dbg(dev->dev.parent, "Error Active\n");
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+	case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
+		reg_ier = AT91_IRQ_ERRP;
+		break;
+	case CAN_STATE_ERROR_PASSIVE:
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
+		reg_ier = AT91_IRQ_BOFF;
+		break;
+	case CAN_STATE_BUS_OFF:
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
+			AT91_IRQ_WARN | AT91_IRQ_BOFF;
+		reg_ier = 0;
+
+		cf->can_id |= CAN_ERR_BUSOFF;
+
+		dev_dbg(dev->dev.parent, "bus-off\n");
+		netif_carrier_off(dev);
+		priv->can.can_stats.bus_off++;
+
+		/* turn off chip, if restart is disabled */
+		if (!priv->can.restart_ms) {
+			at91_chip_stop(dev, CAN_STATE_BUS_OFF);
+			return;
+		}
+		break;
+	default:
+		break;
+	}
+
+	at91_write(priv, AT91_IDR, reg_idr);
+	at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_irq_err(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct can_frame *cf;
+	enum can_state new_state;
+	u32 reg_sr;
+
+	reg_sr = at91_read(priv, AT91_SR);
+
+	/* we need to look at the unmasked reg_sr */
+	if (unlikely(reg_sr & AT91_IRQ_BOFF))
+		new_state = CAN_STATE_BUS_OFF;
+	else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+		new_state = CAN_STATE_ERROR_PASSIVE;
+	else if (unlikely(reg_sr & AT91_IRQ_WARN))
+		new_state = CAN_STATE_ERROR_WARNING;
+	else if (likely(reg_sr & AT91_IRQ_ERRA))
+		new_state = CAN_STATE_ERROR_ACTIVE;
+	else {
+		dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+		return;
+	}
+
+	/* state hasn't changed */
+	if (likely(new_state == priv->can.state))
+		return;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return;
+
+	at91_irq_err_state(dev, cf, new_state);
+	netif_rx(skb);
+
+	dev->last_rx = jiffies;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	priv->can.state = new_state;
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t at91_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct at91_priv *priv = netdev_priv(dev);
+	irqreturn_t handled = IRQ_NONE;
+	u32 reg_sr, reg_imr;
+
+	reg_sr = at91_read(priv, AT91_SR);
+	reg_imr = at91_read(priv, AT91_IMR);
+
+	/* Ignore masked interrupts */
+	reg_sr &= reg_imr;
+	if (!reg_sr)
+		goto exit;
+
+	handled = IRQ_HANDLED;
+
+	/* Receive or error interrupt? -> napi */
+	if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+		/*
+		 * The error bits are clear on read,
+		 * save for later use.
+		 */
+		priv->reg_sr = reg_sr;
+		at91_write(priv, AT91_IDR,
+			   AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+		napi_schedule(&priv->napi);
+	}
+
+	/* Transmission complete interrupt */
+	if (reg_sr & AT91_IRQ_MB_TX)
+		at91_irq_tx(dev, reg_sr);
+
+	at91_irq_err(dev);
+
+ exit:
+	return handled;
+}
+
+static int at91_open(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	int err;
+
+	clk_enable(priv->clk);
+
+	/* check or determine and set bittime */
+	err = open_candev(dev);
+	if (err)
+		goto out;
+
+	/* register interrupt handler */
+	if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
+			dev->name, dev)) {
+		err = -EAGAIN;
+		goto out_close;
+	}
+
+	/* start chip and queuing */
+	at91_chip_start(dev);
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+
+ out_close:
+	close_candev(dev);
+ out:
+	clk_disable(priv->clk);
+
+	return err;
+}
+
+/*
+ * stop CAN bus activity
+ */
+static int at91_close(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	at91_chip_stop(dev, CAN_STATE_STOPPED);
+
+	free_irq(dev->irq, dev);
+	clk_disable(priv->clk);
+
+	close_candev(dev);
+
+	return 0;
+}
+
+static int at91_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	switch (mode) {
+	case CAN_MODE_START:
+		at91_chip_start(dev);
+		netif_wake_queue(dev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops at91_netdev_ops = {
+	.ndo_open	= at91_open,
+	.ndo_stop	= at91_close,
+	.ndo_start_xmit	= at91_start_xmit,
+};
+
+static int __init at91_can_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct at91_priv *priv;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *addr;
+	int err, irq;
+
+	clk = clk_get(&pdev->dev, "can_clk");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "no clock defined\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || !irq) {
+		err = -ENODEV;
+		goto exit_put;
+	}
+
+	if (!request_mem_region(res->start,
+				resource_size(res),
+				pdev->name)) {
+		err = -EBUSY;
+		goto exit_put;
+	}
+
+	addr = ioremap_nocache(res->start, resource_size(res));
+	if (!addr) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+
+	dev = alloc_candev(sizeof(struct at91_priv));
+	if (!dev) {
+		err = -ENOMEM;
+		goto exit_iounmap;
+	}
+
+	dev->netdev_ops	= &at91_netdev_ops;
+	dev->irq = irq;
+	dev->flags |= IFF_ECHO;
+
+	priv = netdev_priv(dev);
+	priv->can.clock.freq = clk_get_rate(clk);
+	priv->can.bittiming_const = &at91_bittiming_const;
+	priv->can.do_set_bittiming = at91_set_bittiming;
+	priv->can.do_set_mode = at91_set_mode;
+	priv->reg_base = addr;
+	priv->dev = dev;
+	priv->clk = clk;
+	priv->pdata = pdev->dev.platform_data;
+
+	netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_candev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering netdev failed\n");
+		goto exit_free;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+		 priv->reg_base, dev->irq);
+
+	return 0;
+
+ exit_free:
+	free_netdev(dev);
+ exit_iounmap:
+	iounmap(addr);
+ exit_release:
+	release_mem_region(res->start, resource_size(res));
+ exit_put:
+	clk_put(clk);
+ exit:
+	return err;
+}
+
+static int __devexit at91_can_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct at91_priv *priv = netdev_priv(dev);
+	struct resource *res;
+
+	unregister_netdev(dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	free_netdev(dev);
+
+	iounmap(priv->reg_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	clk_put(priv->clk);
+
+	return 0;
+}
+
+static struct platform_driver at91_can_driver = {
+	.probe		= at91_can_probe,
+	.remove		= __devexit_p(at91_can_remove),
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init at91_can_module_init(void)
+{
+	printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
+	return platform_driver_register(&at91_can_driver);
+}
+
+static void __exit at91_can_module_exit(void)
+{
+	platform_driver_unregister(&at91_can_driver);
+	printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
+}
+
+module_init(at91_can_module_init);
+module_exit(at91_can_module_exit);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa..65a2d0b 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@
 /** NOTE:: For DM646x the IN_VECTOR has changed */
 #define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC	BIT(EMAC_DEF_RX_CH)
 #define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC	BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT	BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT	BIT(27)
+
 
 /* CPPI bit positions */
 #define EMAC_CPPI_SOP_BIT		BIT(31)
@@ -2167,7 +2170,11 @@
 		emac_int_enable(priv);
 	}
 
-	if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) {
+	mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+	if (unlikely(status & mask)) {
 		u32 ch, cause;
 		dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
 		netif_stop_queue(ndev);
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b4..cb90d64 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/kref.h>
 #include <linux/usb.h>
 #include <linux/device.h>
 #include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc..b54d3b4 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/kref.h>
 #include <linux/usb.h>
 #include <linux/device.h>
 #include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d733..8d713eb 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/kref.h>
 #include <linux/usb.h>
 #include <linux/device.h>
 #include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bc..c0e0bb9 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/kref.h>
 #include <linux/usb.h>
 #include <linux/device.h>
 #include <linux/crc32.h>
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c498d2..d445845 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
-/* A simple network driver using virtio.
+/* A network driver using virtio.
  *
  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
  *
@@ -48,19 +48,9 @@
 	struct napi_struct napi;
 	unsigned int status;
 
-	/* The skb we couldn't send because buffers were full. */
-	struct sk_buff *last_xmit_skb;
-
-	/* If we need to free in a timer, this is it. */
-	struct timer_list xmit_free_timer;
-
 	/* Number of input buffers, and max we've ever had. */
 	unsigned int num, max;
 
-	/* For cleaning up after transmission. */
-	struct tasklet_struct tasklet;
-	bool free_in_tasklet;
-
 	/* I like... big packets and I cannot lie! */
 	bool big_packets;
 
@@ -78,9 +68,17 @@
 	struct page *pages;
 };
 
-static inline void *skb_vnet_hdr(struct sk_buff *skb)
+struct skb_vnet_hdr {
+	union {
+		struct virtio_net_hdr hdr;
+		struct virtio_net_hdr_mrg_rxbuf mhdr;
+	};
+	unsigned int num_sg;
+};
+
+static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
 {
-	return (struct virtio_net_hdr *)skb->cb;
+	return (struct skb_vnet_hdr *)skb->cb;
 }
 
 static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -119,17 +117,13 @@
 
 	/* We were probably waiting for more output buffers. */
 	netif_wake_queue(vi->dev);
-
-	/* Make sure we re-xmit last_xmit_skb: if there are no more packets
-	 * queued, start_xmit won't be called. */
-	tasklet_schedule(&vi->tasklet);
 }
 
 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
 			unsigned len)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 	int err;
 	int i;
 
@@ -140,7 +134,6 @@
 	}
 
 	if (vi->mergeable_rx_bufs) {
-		struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
 		unsigned int copy;
 		char *p = page_address(skb_shinfo(skb)->frags[0].page);
 
@@ -148,8 +141,8 @@
 			len = PAGE_SIZE;
 		len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
 
-		memcpy(hdr, p, sizeof(*mhdr));
-		p += sizeof(*mhdr);
+		memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
+		p += sizeof(hdr->mhdr);
 
 		copy = len;
 		if (copy > skb_tailroom(skb))
@@ -164,13 +157,13 @@
 			skb_shinfo(skb)->nr_frags--;
 		} else {
 			skb_shinfo(skb)->frags[0].page_offset +=
-				sizeof(*mhdr) + copy;
+				sizeof(hdr->mhdr) + copy;
 			skb_shinfo(skb)->frags[0].size = len;
 			skb->data_len += len;
 			skb->len += len;
 		}
 
-		while (--mhdr->num_buffers) {
+		while (--hdr->mhdr.num_buffers) {
 			struct sk_buff *nskb;
 
 			i = skb_shinfo(skb)->nr_frags;
@@ -184,7 +177,7 @@
 			nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
 			if (!nskb) {
 				pr_debug("%s: rx error: %d buffers missing\n",
-					 dev->name, mhdr->num_buffers);
+					 dev->name, hdr->mhdr.num_buffers);
 				dev->stats.rx_length_errors++;
 				goto drop;
 			}
@@ -205,7 +198,7 @@
 			skb->len += len;
 		}
 	} else {
-		len -= sizeof(struct virtio_net_hdr);
+		len -= sizeof(hdr->hdr);
 
 		if (len <= MAX_PACKET_LEN)
 			trim_pages(vi, skb);
@@ -223,9 +216,11 @@
 	dev->stats.rx_bytes += skb->len;
 	dev->stats.rx_packets++;
 
-	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		pr_debug("Needs csum!\n");
-		if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
+		if (!skb_partial_csum_set(skb,
+					  hdr->hdr.csum_start,
+					  hdr->hdr.csum_offset))
 			goto frame_err;
 	}
 
@@ -233,9 +228,9 @@
 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
 
-	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 		pr_debug("GSO!\n");
-		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 			break;
@@ -248,14 +243,14 @@
 		default:
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: bad gso type %u.\n",
-				       dev->name, hdr->gso_type);
+				       dev->name, hdr->hdr.gso_type);
 			goto frame_err;
 		}
 
-		if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
-		skb_shinfo(skb)->gso_size = hdr->gso_size;
+		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
 		if (skb_shinfo(skb)->gso_size == 0) {
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: zero gso size.\n",
@@ -285,8 +280,8 @@
 	bool oom = false;
 
 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
-	for (;;) {
-		struct virtio_net_hdr *hdr;
+	do {
+		struct skb_vnet_hdr *hdr;
 
 		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
 		if (unlikely(!skb)) {
@@ -298,7 +293,7 @@
 		skb_put(skb, MAX_PACKET_LEN);
 
 		hdr = skb_vnet_hdr(skb);
-		sg_set_buf(sg, hdr, sizeof(*hdr));
+		sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 
 		if (vi->big_packets) {
 			for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -328,7 +323,7 @@
 			break;
 		}
 		vi->num++;
-	}
+	} while (err >= num);
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
@@ -346,7 +341,7 @@
 	if (!vi->mergeable_rx_bufs)
 		return try_fill_recv_maxbufs(vi, gfp);
 
-	for (;;) {
+	do {
 		skb_frag_t *f;
 
 		skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -380,7 +375,7 @@
 			break;
 		}
 		vi->num++;
-	}
+	} while (err > 0);
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
@@ -448,42 +443,26 @@
 	return received;
 }
 
-static void free_old_xmit_skbs(struct virtnet_info *vi)
+static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 {
 	struct sk_buff *skb;
-	unsigned int len;
+	unsigned int len, tot_sgs = 0;
 
 	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 		__skb_unlink(skb, &vi->send);
 		vi->dev->stats.tx_bytes += skb->len;
 		vi->dev->stats.tx_packets++;
+		tot_sgs += skb_vnet_hdr(skb)->num_sg;
 		kfree_skb(skb);
 	}
-}
-
-/* If the virtio transport doesn't always notify us when all in-flight packets
- * are consumed, we fall back to using this function on a timer to free them. */
-static void xmit_free(unsigned long data)
-{
-	struct virtnet_info *vi = (void *)data;
-
-	netif_tx_lock(vi->dev);
-
-	free_old_xmit_skbs(vi);
-
-	if (!skb_queue_empty(&vi->send))
-		mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
-	netif_tx_unlock(vi->dev);
+	return tot_sgs;
 }
 
 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 {
-	int num, err;
 	struct scatterlist sg[2+MAX_SKB_FRAGS];
-	struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
-	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 
 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -491,108 +470,89 @@
 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-		hdr->csum_start = skb->csum_start - skb_headroom(skb);
-		hdr->csum_offset = skb->csum_offset;
+		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+		hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+		hdr->hdr.csum_offset = skb->csum_offset;
 	} else {
-		hdr->flags = 0;
-		hdr->csum_offset = hdr->csum_start = 0;
+		hdr->hdr.flags = 0;
+		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
 	}
 
 	if (skb_is_gso(skb)) {
-		hdr->hdr_len = skb_headlen(skb);
-		hdr->gso_size = skb_shinfo(skb)->gso_size;
+		hdr->hdr.hdr_len = skb_headlen(skb);
+		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
 		else
 			BUG();
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
-			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
 	} else {
-		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
-		hdr->gso_size = hdr->hdr_len = 0;
+		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
 	}
 
-	mhdr->num_buffers = 0;
+	hdr->mhdr.num_buffers = 0;
 
 	/* Encode metadata header at front. */
 	if (vi->mergeable_rx_bufs)
-		sg_set_buf(sg, mhdr, sizeof(*mhdr));
+		sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
 	else
-		sg_set_buf(sg, hdr, sizeof(*hdr));
+		sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 
-	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-
-	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
-	if (err >= 0 && !vi->free_in_tasklet)
-		mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
-	return err;
-}
-
-static void xmit_tasklet(unsigned long data)
-{
-	struct virtnet_info *vi = (void *)data;
-
-	netif_tx_lock_bh(vi->dev);
-	if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
-		vi->svq->vq_ops->kick(vi->svq);
-		vi->last_xmit_skb = NULL;
-	}
-	if (vi->free_in_tasklet)
-		free_old_xmit_skbs(vi);
-	netif_tx_unlock_bh(vi->dev);
+	hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
+	return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
 }
 
 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	int capacity;
 
 again:
 	/* Free up any pending old buffers before queueing new ones. */
 	free_old_xmit_skbs(vi);
 
-	/* If we has a buffer left over from last time, send it now. */
-	if (unlikely(vi->last_xmit_skb) &&
-	    xmit_skb(vi, vi->last_xmit_skb) < 0)
-		goto stop_queue;
-
-	vi->last_xmit_skb = NULL;
-
 	/* Put new one in send queue and do transmit */
-	if (likely(skb)) {
-		__skb_queue_head(&vi->send, skb);
-		if (xmit_skb(vi, skb) < 0) {
-			vi->last_xmit_skb = skb;
-			skb = NULL;
-			goto stop_queue;
+	__skb_queue_head(&vi->send, skb);
+	capacity = xmit_skb(vi, skb);
+
+	/* This can happen with OOM and indirect buffers. */
+	if (unlikely(capacity < 0)) {
+		netif_stop_queue(dev);
+		dev_warn(&dev->dev, "Unexpected full queue\n");
+		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+			vi->svq->vq_ops->disable_cb(vi->svq);
+			netif_start_queue(dev);
+			goto again;
+		}
+		return NETDEV_TX_BUSY;
+	}
+
+	vi->svq->vq_ops->kick(vi->svq);
+	/* Don't wait up for transmitted skbs to be freed. */
+	skb_orphan(skb);
+	nf_reset(skb);
+
+	/* Apparently nice girls don't return TX_BUSY; stop the queue
+	 * before it gets out of hand.  Naturally, this wastes entries. */
+	if (capacity < 2+MAX_SKB_FRAGS) {
+		netif_stop_queue(dev);
+		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+			/* More just got used, free them then recheck. */
+			capacity += free_old_xmit_skbs(vi);
+			if (capacity >= 2+MAX_SKB_FRAGS) {
+				netif_start_queue(dev);
+				vi->svq->vq_ops->disable_cb(vi->svq);
+			}
 		}
 	}
-done:
-	vi->svq->vq_ops->kick(vi->svq);
+
 	return NETDEV_TX_OK;
-
-stop_queue:
-	pr_debug("%s: virtio not prepared to send\n", dev->name);
-	netif_stop_queue(dev);
-
-	/* Activate callback for using skbs: if this returns false it
-	 * means some were used in the meantime. */
-	if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
-		vi->svq->vq_ops->disable_cb(vi->svq);
-		netif_start_queue(dev);
-		goto again;
-	}
-	if (skb) {
-		/* Drop this skb: we only queue one. */
-		vi->dev->stats.tx_dropped++;
-		kfree_skb(skb);
-	}
-	goto done;
 }
 
 static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -925,10 +885,6 @@
 	vi->pages = NULL;
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
 
-	/* If they give us a callback when all buffers are done, we don't need
-	 * the timer. */
-	vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
-
 	/* If we can receive ANY GSO packets, we must allocate large ones. */
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
 	    || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -960,11 +916,6 @@
 	skb_queue_head_init(&vi->recv);
 	skb_queue_head_init(&vi->send);
 
-	tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
-
-	if (!vi->free_in_tasklet)
-		setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
-
 	err = register_netdev(dev);
 	if (err) {
 		pr_debug("virtio_net: registering device failed\n");
@@ -1005,9 +956,6 @@
 	/* Stop all the virtqueues. */
 	vdev->config->reset(vdev);
 
-	if (!vi->free_in_tasklet)
-		del_timer_sync(&vi->xmit_free_timer);
-
 	/* Free our skbs in send and recv queues, if any. */
 	while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
 		kfree_skb(skb);
@@ -1041,7 +989,6 @@
 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
-	VIRTIO_F_NOTIFY_ON_EMPTY,
 };
 
 static struct virtio_driver virtio_net = {
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index a9d926b..e7be66d 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -406,7 +406,6 @@
 			__func__, status);
 		return retval;
 	}
-	info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0';
 
 	if (info->current_status && (info->valid & ACPI_VALID_HID) &&
 			(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index da3c08b..749e210 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -624,7 +624,7 @@
 	struct backlight_device *bd = eeepc_backlight_device;
 	if (bd) {
 		int old = bd->props.brightness;
-		bd->props.brightness = read_brightness(bd);
+		backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
 		return old;
 	}
 	return -1;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f78d275..3910f2f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
  */
 
 #define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020400
+#define TPACPI_SYSFS_VERSION 0x020500
 
 /*
  *  Changelog:
@@ -145,6 +145,51 @@
 	TP_ACPI_WGSV_STATE_UWBPWR	= 0x0020, /* UWB radio enabled */
 };
 
+/* HKEY events */
+enum tpacpi_hkey_event_t {
+	/* Hotkey-related */
+	TP_HKEY_EV_HOTKEY_BASE		= 0x1001, /* first hotkey (FN+F1) */
+	TP_HKEY_EV_BRGHT_UP		= 0x1010, /* Brightness up */
+	TP_HKEY_EV_BRGHT_DOWN		= 0x1011, /* Brightness down */
+	TP_HKEY_EV_VOL_UP		= 0x1015, /* Volume up or unmute */
+	TP_HKEY_EV_VOL_DOWN		= 0x1016, /* Volume down or unmute */
+	TP_HKEY_EV_VOL_MUTE		= 0x1017, /* Mixer output mute */
+
+	/* Reasons for waking up from S3/S4 */
+	TP_HKEY_EV_WKUP_S3_UNDOCK	= 0x2304, /* undock requested, S3 */
+	TP_HKEY_EV_WKUP_S4_UNDOCK	= 0x2404, /* undock requested, S4 */
+	TP_HKEY_EV_WKUP_S3_BAYEJ	= 0x2305, /* bay ejection req, S3 */
+	TP_HKEY_EV_WKUP_S4_BAYEJ	= 0x2405, /* bay ejection req, S4 */
+	TP_HKEY_EV_WKUP_S3_BATLOW	= 0x2313, /* battery empty, S3 */
+	TP_HKEY_EV_WKUP_S4_BATLOW	= 0x2413, /* battery empty, S4 */
+
+	/* Auto-sleep after eject request */
+	TP_HKEY_EV_BAYEJ_ACK		= 0x3003, /* bay ejection complete */
+	TP_HKEY_EV_UNDOCK_ACK		= 0x4003, /* undock complete */
+
+	/* Misc bay events */
+	TP_HKEY_EV_OPTDRV_EJ		= 0x3006, /* opt. drive tray ejected */
+
+	/* User-interface events */
+	TP_HKEY_EV_LID_CLOSE		= 0x5001, /* laptop lid closed */
+	TP_HKEY_EV_LID_OPEN		= 0x5002, /* laptop lid opened */
+	TP_HKEY_EV_TABLET_TABLET	= 0x5009, /* tablet swivel up */
+	TP_HKEY_EV_TABLET_NOTEBOOK	= 0x500a, /* tablet swivel down */
+	TP_HKEY_EV_PEN_INSERTED		= 0x500b, /* tablet pen inserted */
+	TP_HKEY_EV_PEN_REMOVED		= 0x500c, /* tablet pen removed */
+	TP_HKEY_EV_BRGHT_CHANGED	= 0x5010, /* backlight control event */
+
+	/* Thermal events */
+	TP_HKEY_EV_ALARM_BAT_HOT	= 0x6011, /* battery too hot */
+	TP_HKEY_EV_ALARM_BAT_XHOT	= 0x6012, /* battery critically hot */
+	TP_HKEY_EV_ALARM_SENSOR_HOT	= 0x6021, /* sensor too hot */
+	TP_HKEY_EV_ALARM_SENSOR_XHOT	= 0x6022, /* sensor critically hot */
+	TP_HKEY_EV_THM_TABLE_CHANGED	= 0x6030, /* thermal table changed */
+
+	/* Misc */
+	TP_HKEY_EV_RFKILL_CHANGED	= 0x7000, /* rfkill switch changed */
+};
+
 /****************************************************************************
  * Main driver
  */
@@ -1848,6 +1893,27 @@
  * Hotkey subdriver
  */
 
+/*
+ * ThinkPad firmware event model
+ *
+ * The ThinkPad firmware has two main event interfaces: normal ACPI
+ * notifications (which follow the ACPI standard), and a private event
+ * interface.
+ *
+ * The private event interface also issues events for the hotkeys.  As
+ * the driver gained features, the event handling code ended up being
+ * built around the hotkey subdriver.  This will need to be refactored
+ * to a more formal event API eventually.
+ *
+ * Some "hotkeys" are actually supposed to be used as event reports,
+ * such as "brightness has changed", "volume has changed", depending on
+ * the ThinkPad model and how the firmware is operating.
+ *
+ * Unlike other classes, hotkey-class events have mask/unmask control on
+ * non-ancient firmware.  However, how it behaves changes a lot with the
+ * firmware model and version.
+ */
+
 enum {	/* hot key scan codes (derived from ACPI DSDT) */
 	TP_ACPI_HOTKEYSCAN_FNF1		= 0,
 	TP_ACPI_HOTKEYSCAN_FNF2,
@@ -1875,7 +1941,7 @@
 	TP_ACPI_HOTKEYSCAN_THINKPAD,
 };
 
-enum {	/* Keys available through NVRAM polling */
+enum {	/* Keys/events available through NVRAM polling */
 	TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
 	TPACPI_HKEY_NVRAM_GOOD_MASK  = 0x00fb8000U,
 };
@@ -1930,8 +1996,11 @@
 static struct mutex hotkey_thread_mutex;
 
 /*
- * Acquire mutex to write poller control variables.
- * Increment hotkey_config_change when changing them.
+ * Acquire mutex to write poller control variables as an
+ * atomic block.
+ *
+ * Increment hotkey_config_change when changing them if you
+ * want the kthread to forget old state.
  *
  * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
  */
@@ -1942,6 +2011,11 @@
  * hotkey poller control variables
  *
  * Must be atomic or readers will also need to acquire mutex
+ *
+ * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
+ * should be used only when the changes need to be taken as
+ * a block, OR when one needs to force the kthread to forget
+ * old state.
  */
 static u32 hotkey_source_mask;		/* bit mask 0=ACPI,1=NVRAM */
 static unsigned int hotkey_poll_freq = 10; /* Hz */
@@ -1972,10 +2046,12 @@
 
 static int hotkey_autosleep_ack;
 
-static u32 hotkey_orig_mask;
-static u32 hotkey_all_mask;
-static u32 hotkey_reserved_mask;
-static u32 hotkey_mask;
+static u32 hotkey_orig_mask;		/* events the BIOS had enabled */
+static u32 hotkey_all_mask;		/* all events supported in fw */
+static u32 hotkey_reserved_mask;	/* events better left disabled */
+static u32 hotkey_driver_mask;		/* events needed by the driver */
+static u32 hotkey_user_mask;		/* events visible to userspace */
+static u32 hotkey_acpi_mask;		/* events enabled in firmware */
 
 static unsigned int hotkey_report_mode;
 
@@ -1983,6 +2059,9 @@
 
 static struct attribute_set *hotkey_dev_attributes;
 
+static void tpacpi_driver_event(const unsigned int hkey_event);
+static void hotkey_driver_event(const unsigned int scancode);
+
 /* HKEY.MHKG() return bits */
 #define TP_HOTKEY_TABLET_MASK (1 << 3)
 
@@ -2017,24 +2096,53 @@
 }
 
 /*
+ * Reads current event mask from firmware, and updates
+ * hotkey_acpi_mask accordingly.  Also resets any bits
+ * from hotkey_user_mask that are unavailable to be
+ * delivered (shadow requirement of the userspace ABI).
+ *
  * Call with hotkey_mutex held
  */
 static int hotkey_mask_get(void)
 {
-	u32 m = 0;
-
 	if (tp_features.hotkey_mask) {
+		u32 m = 0;
+
 		if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
 			return -EIO;
+
+		hotkey_acpi_mask = m;
+	} else {
+		/* no mask support doesn't mean no event support... */
+		hotkey_acpi_mask = hotkey_all_mask;
 	}
-	HOTKEY_CONFIG_CRITICAL_START
-	hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
-	HOTKEY_CONFIG_CRITICAL_END
+
+	/* sync userspace-visible mask */
+	hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
 
 	return 0;
 }
 
+void static hotkey_mask_warn_incomplete_mask(void)
+{
+	/* log only what the user can fix... */
+	const u32 wantedmask = hotkey_driver_mask &
+		~(hotkey_acpi_mask | hotkey_source_mask) &
+		(hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
+
+	if (wantedmask)
+		printk(TPACPI_NOTICE
+			"required events 0x%08x not enabled!\n",
+			wantedmask);
+}
+
 /*
+ * Set the firmware mask when supported
+ *
+ * Also calls hotkey_mask_get to update hotkey_acpi_mask.
+ *
+ * NOTE: does not set bits in hotkey_user_mask, but may reset them.
+ *
  * Call with hotkey_mutex held
  */
 static int hotkey_mask_set(u32 mask)
@@ -2042,66 +2150,98 @@
 	int i;
 	int rc = 0;
 
-	if (tp_features.hotkey_mask) {
-		if (!tp_warned.hotkey_mask_ff &&
-		    (mask == 0xffff || mask == 0xffffff ||
-		     mask == 0xffffffff)) {
-			tp_warned.hotkey_mask_ff = 1;
-			printk(TPACPI_NOTICE
-			       "setting the hotkey mask to 0x%08x is likely "
-			       "not the best way to go about it\n", mask);
-			printk(TPACPI_NOTICE
-			       "please consider using the driver defaults, "
-			       "and refer to up-to-date thinkpad-acpi "
-			       "documentation\n");
-		}
+	const u32 fwmask = mask & ~hotkey_source_mask;
 
-		HOTKEY_CONFIG_CRITICAL_START
+	if (tp_features.hotkey_mask) {
 		for (i = 0; i < 32; i++) {
-			u32 m = 1 << i;
-			/* enable in firmware mask only keys not in NVRAM
-			 * mode, but enable the key in the cached hotkey_mask
-			 * regardless of mode, or the key will end up
-			 * disabled by hotkey_mask_get() */
 			if (!acpi_evalf(hkey_handle,
 					NULL, "MHKM", "vdd", i + 1,
-					!!((mask & ~hotkey_source_mask) & m))) {
+					!!(mask & (1 << i)))) {
 				rc = -EIO;
 				break;
-			} else {
-				hotkey_mask = (hotkey_mask & ~m) | (mask & m);
 			}
 		}
-		HOTKEY_CONFIG_CRITICAL_END
-
-		/* hotkey_mask_get must be called unconditionally below */
-		if (!hotkey_mask_get() && !rc &&
-		    (hotkey_mask & ~hotkey_source_mask) !=
-		     (mask & ~hotkey_source_mask)) {
-			printk(TPACPI_NOTICE
-			       "requested hot key mask 0x%08x, but "
-			       "firmware forced it to 0x%08x\n",
-			       mask, hotkey_mask);
-		}
-	} else {
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-		HOTKEY_CONFIG_CRITICAL_START
-		hotkey_mask = mask & hotkey_source_mask;
-		HOTKEY_CONFIG_CRITICAL_END
-		hotkey_mask_get();
-		if (hotkey_mask != mask) {
-			printk(TPACPI_NOTICE
-			       "requested hot key mask 0x%08x, "
-			       "forced to 0x%08x (NVRAM poll mask is "
-			       "0x%08x): no firmware mask support\n",
-			       mask, hotkey_mask, hotkey_source_mask);
-		}
-#else
-		hotkey_mask_get();
-		rc = -ENXIO;
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
 	}
 
+	/*
+	 * We *must* make an inconditional call to hotkey_mask_get to
+	 * refresh hotkey_acpi_mask and update hotkey_user_mask
+	 *
+	 * Take the opportunity to also log when we cannot _enable_
+	 * a given event.
+	 */
+	if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
+		printk(TPACPI_NOTICE
+		       "asked for hotkey mask 0x%08x, but "
+		       "firmware forced it to 0x%08x\n",
+		       fwmask, hotkey_acpi_mask);
+	}
+
+	hotkey_mask_warn_incomplete_mask();
+
+	return rc;
+}
+
+/*
+ * Sets hotkey_user_mask and tries to set the firmware mask
+ *
+ * Call with hotkey_mutex held
+ */
+static int hotkey_user_mask_set(const u32 mask)
+{
+	int rc;
+
+	/* Give people a chance to notice they are doing something that
+	 * is bound to go boom on their users sooner or later */
+	if (!tp_warned.hotkey_mask_ff &&
+	    (mask == 0xffff || mask == 0xffffff ||
+	     mask == 0xffffffff)) {
+		tp_warned.hotkey_mask_ff = 1;
+		printk(TPACPI_NOTICE
+		       "setting the hotkey mask to 0x%08x is likely "
+		       "not the best way to go about it\n", mask);
+		printk(TPACPI_NOTICE
+		       "please consider using the driver defaults, "
+		       "and refer to up-to-date thinkpad-acpi "
+		       "documentation\n");
+	}
+
+	/* Try to enable what the user asked for, plus whatever we need.
+	 * this syncs everything but won't enable bits in hotkey_user_mask */
+	rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
+
+	/* Enable the available bits in hotkey_user_mask */
+	hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
+
+	return rc;
+}
+
+/*
+ * Sets the driver hotkey mask.
+ *
+ * Can be called even if the hotkey subdriver is inactive
+ */
+static int tpacpi_hotkey_driver_mask_set(const u32 mask)
+{
+	int rc;
+
+	/* Do the right thing if hotkey_init has not been called yet */
+	if (!tp_features.hotkey) {
+		hotkey_driver_mask = mask;
+		return 0;
+	}
+
+	mutex_lock(&hotkey_mutex);
+
+	HOTKEY_CONFIG_CRITICAL_START
+	hotkey_driver_mask = mask;
+	hotkey_source_mask |= (mask & ~hotkey_all_mask);
+	HOTKEY_CONFIG_CRITICAL_END
+
+	rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
+							~hotkey_source_mask);
+	mutex_unlock(&hotkey_mutex);
+
 	return rc;
 }
 
@@ -2137,11 +2277,10 @@
 	}
 }
 
-static void tpacpi_input_send_key(unsigned int scancode)
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key(const unsigned int scancode)
 {
-	unsigned int keycode;
-
-	keycode = hotkey_keycode_map[scancode];
+	const unsigned int keycode = hotkey_keycode_map[scancode];
 
 	if (keycode != KEY_RESERVED) {
 		mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -2162,19 +2301,28 @@
 	}
 }
 
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key_masked(const unsigned int scancode)
+{
+	hotkey_driver_event(scancode);
+	if (hotkey_user_mask & (1 << scancode))
+		tpacpi_input_send_key(scancode);
+}
+
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
 static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
 
+/* Do NOT call without validating scancode first */
 static void tpacpi_hotkey_send_key(unsigned int scancode)
 {
-	tpacpi_input_send_key(scancode);
+	tpacpi_input_send_key_masked(scancode);
 	if (hotkey_report_mode < 2) {
 		acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
-						0x80, 0x1001 + scancode);
+				0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
 	}
 }
 
-static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
+static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
 {
 	u8 d;
 
@@ -2210,21 +2358,24 @@
 	}
 }
 
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+					   struct tp_nvram_state *newn,
+					   const u32 event_mask)
+{
+
 #define TPACPI_COMPARE_KEY(__scancode, __member) \
 	do { \
-		if ((mask & (1 << __scancode)) && \
+		if ((event_mask & (1 << __scancode)) && \
 		    oldn->__member != newn->__member) \
-		tpacpi_hotkey_send_key(__scancode); \
+			tpacpi_hotkey_send_key(__scancode); \
 	} while (0)
 
 #define TPACPI_MAY_SEND_KEY(__scancode) \
-	do { if (mask & (1 << __scancode)) \
-		tpacpi_hotkey_send_key(__scancode); } while (0)
+	do { \
+		if (event_mask & (1 << __scancode)) \
+			tpacpi_hotkey_send_key(__scancode); \
+	} while (0)
 
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
-					   struct tp_nvram_state *newn,
-					   u32 mask)
-{
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2270,15 +2421,22 @@
 			}
 		}
 	}
-}
 
 #undef TPACPI_COMPARE_KEY
 #undef TPACPI_MAY_SEND_KEY
+}
 
+/*
+ * Polling driver
+ *
+ * We track all events in hotkey_source_mask all the time, since
+ * most of them are edge-based.  We only issue those requested by
+ * hotkey_user_mask or hotkey_driver_mask, though.
+ */
 static int hotkey_kthread(void *data)
 {
 	struct tp_nvram_state s[2];
-	u32 mask;
+	u32 poll_mask, event_mask;
 	unsigned int si, so;
 	unsigned long t;
 	unsigned int change_detector, must_reset;
@@ -2298,10 +2456,12 @@
 	/* Initial state for compares */
 	mutex_lock(&hotkey_thread_data_mutex);
 	change_detector = hotkey_config_change;
-	mask = hotkey_source_mask & hotkey_mask;
+	poll_mask = hotkey_source_mask;
+	event_mask = hotkey_source_mask &
+			(hotkey_driver_mask | hotkey_user_mask);
 	poll_freq = hotkey_poll_freq;
 	mutex_unlock(&hotkey_thread_data_mutex);
-	hotkey_read_nvram(&s[so], mask);
+	hotkey_read_nvram(&s[so], poll_mask);
 
 	while (!kthread_should_stop()) {
 		if (t == 0) {
@@ -2324,15 +2484,17 @@
 			t = 0;
 			change_detector = hotkey_config_change;
 		}
-		mask = hotkey_source_mask & hotkey_mask;
+		poll_mask = hotkey_source_mask;
+		event_mask = hotkey_source_mask &
+				(hotkey_driver_mask | hotkey_user_mask);
 		poll_freq = hotkey_poll_freq;
 		mutex_unlock(&hotkey_thread_data_mutex);
 
-		if (likely(mask)) {
-			hotkey_read_nvram(&s[si], mask);
+		if (likely(poll_mask)) {
+			hotkey_read_nvram(&s[si], poll_mask);
 			if (likely(si != so)) {
 				hotkey_compare_and_issue_event(&s[so], &s[si],
-								mask);
+								event_mask);
 			}
 		}
 
@@ -2364,10 +2526,12 @@
 /* call with hotkey_mutex held */
 static void hotkey_poll_setup(bool may_warn)
 {
-	u32 hotkeys_to_poll = hotkey_source_mask & hotkey_mask;
+	const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
+	const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
 
-	if (hotkeys_to_poll != 0 && hotkey_poll_freq > 0 &&
-	    (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
+	if (hotkey_poll_freq > 0 &&
+	    (poll_driver_mask ||
+	     (poll_user_mask && tpacpi_inputdev->users > 0))) {
 		if (!tpacpi_hotkey_task) {
 			tpacpi_hotkey_task = kthread_run(hotkey_kthread,
 					NULL, TPACPI_NVRAM_KTHREAD_NAME);
@@ -2380,12 +2544,13 @@
 		}
 	} else {
 		hotkey_poll_stop_sync();
-		if (may_warn && hotkeys_to_poll != 0 &&
+		if (may_warn && (poll_driver_mask || poll_user_mask) &&
 		    hotkey_poll_freq == 0) {
 			printk(TPACPI_NOTICE
-				"hot keys 0x%08x require polling, "
-				"which is currently disabled\n",
-				hotkeys_to_poll);
+				"hot keys 0x%08x and/or events 0x%08x "
+				"require polling, which is currently "
+				"disabled\n",
+				poll_user_mask, poll_driver_mask);
 		}
 	}
 }
@@ -2403,9 +2568,7 @@
 	if (!freq)
 		hotkey_poll_stop_sync();
 
-	HOTKEY_CONFIG_CRITICAL_START
 	hotkey_poll_freq = freq;
-	HOTKEY_CONFIG_CRITICAL_END
 }
 
 #else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2440,7 +2603,8 @@
 static void hotkey_inputdev_close(struct input_dev *dev)
 {
 	/* disable hotkey polling when possible */
-	if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
+	if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+	    !(hotkey_source_mask & hotkey_driver_mask))
 		hotkey_poll_setup_safe(false);
 }
 
@@ -2488,15 +2652,7 @@
 			   struct device_attribute *attr,
 			   char *buf)
 {
-	int res;
-
-	if (mutex_lock_killable(&hotkey_mutex))
-		return -ERESTARTSYS;
-	res = hotkey_mask_get();
-	mutex_unlock(&hotkey_mutex);
-
-	return (res)?
-		res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
 }
 
 static ssize_t hotkey_mask_store(struct device *dev,
@@ -2512,7 +2668,7 @@
 	if (mutex_lock_killable(&hotkey_mutex))
 		return -ERESTARTSYS;
 
-	res = hotkey_mask_set(t);
+	res = hotkey_user_mask_set(t);
 
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
 	hotkey_poll_setup(true);
@@ -2594,6 +2750,8 @@
 			    const char *buf, size_t count)
 {
 	unsigned long t;
+	u32 r_ev;
+	int rc;
 
 	if (parse_strtoul(buf, 0xffffffffUL, &t) ||
 		((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
@@ -2606,14 +2764,28 @@
 	hotkey_source_mask = t;
 	HOTKEY_CONFIG_CRITICAL_END
 
+	rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
+			~hotkey_source_mask);
 	hotkey_poll_setup(true);
-	hotkey_mask_set(hotkey_mask);
+
+	/* check if events needed by the driver got disabled */
+	r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
+		& ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
 
 	mutex_unlock(&hotkey_mutex);
 
+	if (rc < 0)
+		printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
+			"firmware event mask!\n");
+
+	if (r_ev)
+		printk(TPACPI_NOTICE "hotkey_source_mask: "
+			"some important events were disabled: "
+			"0x%04x\n", r_ev);
+
 	tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
 
-	return count;
+	return (rc < 0) ? rc : count;
 }
 
 static struct device_attribute dev_attr_hotkey_source_mask =
@@ -2731,9 +2903,8 @@
 
 static void hotkey_wakeup_reason_notify_change(void)
 {
-	if (tp_features.hotkey_mask)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "wakeup_reason");
+	sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+		     "wakeup_reason");
 }
 
 /* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
@@ -2750,9 +2921,8 @@
 
 static void hotkey_wakeup_hotunplug_complete_notify_change(void)
 {
-	if (tp_features.hotkey_mask)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "wakeup_hotunplug_complete");
+	sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+		     "wakeup_hotunplug_complete");
 }
 
 /* --------------------------------------------------------------------- */
@@ -2760,27 +2930,19 @@
 static struct attribute *hotkey_attributes[] __initdata = {
 	&dev_attr_hotkey_enable.attr,
 	&dev_attr_hotkey_bios_enabled.attr,
+	&dev_attr_hotkey_bios_mask.attr,
 	&dev_attr_hotkey_report_mode.attr,
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	&dev_attr_hotkey_wakeup_reason.attr,
+	&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
 	&dev_attr_hotkey_mask.attr,
 	&dev_attr_hotkey_all_mask.attr,
 	&dev_attr_hotkey_recommended_mask.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
 	&dev_attr_hotkey_source_mask.attr,
 	&dev_attr_hotkey_poll_freq.attr,
 #endif
 };
 
-static struct attribute *hotkey_mask_attributes[] __initdata = {
-	&dev_attr_hotkey_bios_mask.attr,
-#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	&dev_attr_hotkey_mask.attr,
-	&dev_attr_hotkey_all_mask.attr,
-	&dev_attr_hotkey_recommended_mask.attr,
-#endif
-	&dev_attr_hotkey_wakeup_reason.attr,
-	&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
-};
-
 /*
  * Sync both the hw and sw blocking state of all switches
  */
@@ -2843,16 +3005,16 @@
 
 	kfree(hotkey_keycode_map);
 
-	if (tp_features.hotkey) {
-		dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
-			   "restoring original hot key mask\n");
-		/* no short-circuit boolean operator below! */
-		if ((hotkey_mask_set(hotkey_orig_mask) |
-		     hotkey_status_set(false)) != 0)
-			printk(TPACPI_ERR
-			       "failed to restore hot key mask "
-			       "to BIOS defaults\n");
-	}
+	dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
+		   "restoring original HKEY status and mask\n");
+	/* yes, there is a bitwise or below, we want the
+	 * functions to be called even if one of them fail */
+	if (((tp_features.hotkey_mask &&
+	      hotkey_mask_set(hotkey_orig_mask)) |
+	     hotkey_status_set(false)) != 0)
+		printk(TPACPI_ERR
+		       "failed to restore hot key mask "
+		       "to BIOS defaults\n");
 }
 
 static void __init hotkey_unmap(const unsigned int scancode)
@@ -2864,6 +3026,35 @@
 	}
 }
 
+/*
+ * HKEY quirks:
+ *   TPACPI_HK_Q_INIMASK:	Supports FN+F3,FN+F4,FN+F12
+ */
+
+#define	TPACPI_HK_Q_INIMASK	0x0001
+
+static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
+	TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
+	TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
+	TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
+	TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
+	TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
+	TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
+	TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
+	TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
+	TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
+	TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
+	TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
+	TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
+	TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
+	TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
+	TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
+	TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
+	TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
+	TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
+	TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
+};
+
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
 	/* Requirements for changing the default keymaps:
@@ -2906,9 +3097,7 @@
 		KEY_UNKNOWN,	/* 0x0D: FN+INSERT */
 		KEY_UNKNOWN,	/* 0x0E: FN+DELETE */
 
-		/* brightness: firmware always reacts to them, unless
-		 * X.org did some tricks in the radeon BIOS scratch
-		 * registers of *some* models */
+		/* brightness: firmware always reacts to them */
 		KEY_RESERVED,	/* 0x0F: FN+HOME (brightness up) */
 		KEY_RESERVED,	/* 0x10: FN+END (brightness down) */
 
@@ -2983,6 +3172,8 @@
 	int status;
 	int hkeyv;
 
+	unsigned long quirks;
+
 	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
 			"initializing hotkey subdriver\n");
 
@@ -3008,9 +3199,16 @@
 	if (!tp_features.hotkey)
 		return 1;
 
+	quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
+				     ARRAY_SIZE(tpacpi_hotkey_qtable));
+
 	tpacpi_disable_brightness_delay();
 
-	hotkey_dev_attributes = create_attr_set(13, NULL);
+	/* MUST have enough space for all attributes to be added to
+	 * hotkey_dev_attributes */
+	hotkey_dev_attributes = create_attr_set(
+					ARRAY_SIZE(hotkey_attributes) + 2,
+					NULL);
 	if (!hotkey_dev_attributes)
 		return -ENOMEM;
 	res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -3019,7 +3217,7 @@
 	if (res)
 		goto err_exit;
 
-	/* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+	/* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
 	   A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
 	   for HKEY interface version 0x100 */
 	if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
@@ -3033,10 +3231,22 @@
 			 * MHKV 0x100 in A31, R40, R40e,
 			 * T4x, X31, and later
 			 */
-			tp_features.hotkey_mask = 1;
 			vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
 				"firmware HKEY interface version: 0x%x\n",
 				hkeyv);
+
+			/* Paranoia check AND init hotkey_all_mask */
+			if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+					"MHKA", "qd")) {
+				printk(TPACPI_ERR
+				       "missing MHKA handler, "
+				       "please report this to %s\n",
+				       TPACPI_MAIL);
+				/* Fallback: pre-init for FN+F3,F4,F12 */
+				hotkey_all_mask = 0x080cU;
+			} else {
+				tp_features.hotkey_mask = 1;
+			}
 		}
 	}
 
@@ -3044,32 +3254,23 @@
 		"hotkey masks are %s\n",
 		str_supported(tp_features.hotkey_mask));
 
-	if (tp_features.hotkey_mask) {
-		if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
-				"MHKA", "qd")) {
-			printk(TPACPI_ERR
-			       "missing MHKA handler, "
-			       "please report this to %s\n",
-			       TPACPI_MAIL);
-			/* FN+F12, FN+F4, FN+F3 */
-			hotkey_all_mask = 0x080cU;
-		}
-	}
+	/* Init hotkey_all_mask if not initialized yet */
+	if (!tp_features.hotkey_mask && !hotkey_all_mask &&
+	    (quirks & TPACPI_HK_Q_INIMASK))
+		hotkey_all_mask = 0x080cU;  /* FN+F12, FN+F4, FN+F3 */
 
-	/* hotkey_source_mask *must* be zero for
-	 * the first hotkey_mask_get */
+	/* Init hotkey_acpi_mask and hotkey_orig_mask */
 	if (tp_features.hotkey_mask) {
+		/* hotkey_source_mask *must* be zero for
+		 * the first hotkey_mask_get to return hotkey_orig_mask */
 		res = hotkey_mask_get();
 		if (res)
 			goto err_exit;
 
-		hotkey_orig_mask = hotkey_mask;
-		res = add_many_to_attr_set(
-				hotkey_dev_attributes,
-				hotkey_mask_attributes,
-				ARRAY_SIZE(hotkey_mask_attributes));
-		if (res)
-			goto err_exit;
+		hotkey_orig_mask = hotkey_acpi_mask;
+	} else {
+		hotkey_orig_mask = hotkey_all_mask;
+		hotkey_acpi_mask = hotkey_all_mask;
 	}
 
 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
@@ -3183,14 +3384,9 @@
 	}
 
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	if (tp_features.hotkey_mask) {
-		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
-					& ~hotkey_all_mask
-					& ~hotkey_reserved_mask;
-	} else {
-		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
-					& ~hotkey_reserved_mask;
-	}
+	hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+				& ~hotkey_all_mask
+				& ~hotkey_reserved_mask;
 
 	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
 		    "hotkey source mask 0x%08x, polling freq %u\n",
@@ -3204,13 +3400,18 @@
 		hotkey_exit();
 		return res;
 	}
-	res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
-				& ~hotkey_reserved_mask)
-				| hotkey_orig_mask);
+	res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
+			       | hotkey_driver_mask)
+			      & ~hotkey_source_mask);
 	if (res < 0 && res != -ENXIO) {
 		hotkey_exit();
 		return res;
 	}
+	hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
+				& ~hotkey_reserved_mask;
+	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+		"initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
+		hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
 
 	dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
 			"legacy ibm/hotkey event reporting over procfs %s\n",
@@ -3245,7 +3446,7 @@
 	if (scancode > 0 && scancode < 0x21) {
 		scancode--;
 		if (!(hotkey_source_mask & (1 << scancode))) {
-			tpacpi_input_send_key(scancode);
+			tpacpi_input_send_key_masked(scancode);
 			*send_acpi_ev = false;
 		} else {
 			*ignore_acpi_ev = true;
@@ -3264,20 +3465,20 @@
 	*ignore_acpi_ev = false;
 
 	switch (hkey) {
-	case 0x2304: /* suspend, undock */
-	case 0x2404: /* hibernation, undock */
+	case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
+	case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
 		hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
 		*ignore_acpi_ev = true;
 		break;
 
-	case 0x2305: /* suspend, bay eject */
-	case 0x2405: /* hibernation, bay eject */
+	case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
+	case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
 		hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
 		*ignore_acpi_ev = true;
 		break;
 
-	case 0x2313: /* Battery on critical low level (S3) */
-	case 0x2413: /* Battery on critical low level (S4) */
+	case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
+	case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
 		printk(TPACPI_ALERT
 			"EMERGENCY WAKEUP: battery almost empty\n");
 		/* how to auto-heal: */
@@ -3307,21 +3508,21 @@
 	*ignore_acpi_ev = false;
 
 	switch (hkey) {
-	case 0x5010: /* Lenovo new BIOS: brightness changed */
-	case 0x500b: /* X61t: tablet pen inserted into bay */
-	case 0x500c: /* X61t: tablet pen removed from bay */
+	case TP_HKEY_EV_PEN_INSERTED:  /* X61t: tablet pen inserted into bay */
+	case TP_HKEY_EV_PEN_REMOVED:   /* X61t: tablet pen removed from bay */
 		return true;
 
-	case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
-	case 0x500a: /* X41t-X61t: swivel down (normal mode) */
+	case TP_HKEY_EV_TABLET_TABLET:   /* X41t-X61t: tablet mode */
+	case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
 		tpacpi_input_send_tabletsw();
 		hotkey_tablet_mode_notify_change();
 		*send_acpi_ev = false;
 		return true;
 
-	case 0x5001:
-	case 0x5002:
-		/* LID switch events.  Do not propagate */
+	case TP_HKEY_EV_LID_CLOSE:	/* Lid closed */
+	case TP_HKEY_EV_LID_OPEN:	/* Lid opened */
+	case TP_HKEY_EV_BRGHT_CHANGED:	/* brightness changed */
+		/* do not propagate these events */
 		*ignore_acpi_ev = true;
 		return true;
 
@@ -3339,30 +3540,30 @@
 	*ignore_acpi_ev = false;
 
 	switch (hkey) {
-	case 0x6011:
+	case TP_HKEY_EV_ALARM_BAT_HOT:
 		printk(TPACPI_CRIT
 			"THERMAL ALARM: battery is too hot!\n");
 		/* recommended action: warn user through gui */
 		return true;
-	case 0x6012:
+	case TP_HKEY_EV_ALARM_BAT_XHOT:
 		printk(TPACPI_ALERT
 			"THERMAL EMERGENCY: battery is extremely hot!\n");
 		/* recommended action: immediate sleep/hibernate */
 		return true;
-	case 0x6021:
+	case TP_HKEY_EV_ALARM_SENSOR_HOT:
 		printk(TPACPI_CRIT
 			"THERMAL ALARM: "
 			"a sensor reports something is too hot!\n");
 		/* recommended action: warn user through gui, that */
 		/* some internal component is too hot */
 		return true;
-	case 0x6022:
+	case TP_HKEY_EV_ALARM_SENSOR_XHOT:
 		printk(TPACPI_ALERT
 			"THERMAL EMERGENCY: "
 			"a sensor reports something is extremely hot!\n");
 		/* recommended action: immediate sleep/hibernate */
 		return true;
-	case 0x6030:
+	case TP_HKEY_EV_THM_TABLE_CHANGED:
 		printk(TPACPI_INFO
 			"EC reports that Thermal Table has changed\n");
 		/* recommended action: do nothing, we don't have
@@ -3420,7 +3621,7 @@
 			break;
 		case 3:
 			/* 0x3000-0x3FFF: bay-related wakeups */
-			if (hkey == 0x3003) {
+			if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
 				hotkey_autosleep_ack = 1;
 				printk(TPACPI_INFO
 				       "bay ejected\n");
@@ -3432,7 +3633,7 @@
 			break;
 		case 4:
 			/* 0x4000-0x4FFF: dock-related wakeups */
-			if (hkey == 0x4003) {
+			if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
 				hotkey_autosleep_ack = 1;
 				printk(TPACPI_INFO
 				       "undocked\n");
@@ -3454,7 +3655,8 @@
 			break;
 		case 7:
 			/* 0x7000-0x7FFF: misc */
-			if (tp_features.hotkey_wlsw && hkey == 0x7000) {
+			if (tp_features.hotkey_wlsw &&
+					hkey == TP_HKEY_EV_RFKILL_CHANGED) {
 				tpacpi_send_radiosw_update();
 				send_acpi_ev = 0;
 				known_ev = true;
@@ -3500,10 +3702,12 @@
 {
 	tpacpi_disable_brightness_delay();
 
-	if (hotkey_mask_get())
+	if (hotkey_status_set(true) < 0 ||
+	    hotkey_mask_set(hotkey_acpi_mask) < 0)
 		printk(TPACPI_ERR
-		       "error while trying to read hot key mask "
-		       "from firmware\n");
+		       "error while attempting to reset the event "
+		       "firmware interface\n");
+
 	tpacpi_send_radiosw_update();
 	hotkey_tablet_mode_notify_change();
 	hotkey_wakeup_reason_notify_change();
@@ -3532,8 +3736,8 @@
 		return res;
 
 	len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
-	if (tp_features.hotkey_mask) {
-		len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
+	if (hotkey_all_mask) {
+		len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
 		len += sprintf(p + len,
 			       "commands:\tenable, disable, reset, <mask>\n");
 	} else {
@@ -3570,7 +3774,7 @@
 	if (mutex_lock_killable(&hotkey_mutex))
 		return -ERESTARTSYS;
 
-	mask = hotkey_mask;
+	mask = hotkey_user_mask;
 
 	res = 0;
 	while ((cmd = next_cmd(&buf))) {
@@ -3592,12 +3796,11 @@
 		}
 	}
 
-	if (!res)
+	if (!res) {
 		tpacpi_disclose_usertask("procfs hotkey",
 			"set mask to 0x%08x\n", mask);
-
-	if (!res && mask != hotkey_mask)
-		res = hotkey_mask_set(mask);
+		res = hotkey_user_mask_set(mask);
+	}
 
 errexit:
 	mutex_unlock(&hotkey_mutex);
@@ -6010,8 +6213,10 @@
 					TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
 					&ibm_backlight_data);
 	if (IS_ERR(ibm_backlight_device)) {
+		int rc = PTR_ERR(ibm_backlight_device);
+		ibm_backlight_device = NULL;
 		printk(TPACPI_ERR "Could not register backlight device\n");
-		return PTR_ERR(ibm_backlight_device);
+		return rc;
 	}
 	vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
 			"brightness is supported\n");
@@ -7499,6 +7704,21 @@
  ****************************************************************************
  ****************************************************************************/
 
+/*
+ * HKEY event callout for other subdrivers go here
+ * (yes, it is ugly, but it is quick, safe, and gets the job done
+ */
+static void tpacpi_driver_event(const unsigned int hkey_event)
+{
+}
+
+
+
+static void hotkey_driver_event(const unsigned int scancode)
+{
+	tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
+}
+
 /* sysfs name ---------------------------------------------------------- */
 static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
 			   struct device_attribute *attr,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c07fdb9..83b8b5a 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -153,6 +153,7 @@
 	acpi_handle temp = NULL;
 	acpi_status status;
 	struct pnp_dev *dev;
+	struct acpi_hardware_id *id;
 
 	/*
 	 * If a PnPacpi device is not present , the device
@@ -193,15 +194,12 @@
 	if (dev->capabilities & PNP_CONFIGURABLE)
 		pnpacpi_parse_resource_option_data(dev);
 
-	if (device->flags.compatible_ids) {
-		struct acpica_device_id_list *cid_list = device->pnp.cid_list;
-		int i;
-
-		for (i = 0; i < cid_list->count; i++) {
-			if (!ispnpidacpi(cid_list->ids[i].string))
-				continue;
-			pnp_add_id(dev, cid_list->ids[i].string);
-		}
+	list_for_each_entry(id, &device->pnp.ids, list) {
+		if (!strcmp(id->id, acpi_device_hid(device)))
+			continue;
+		if (!ispnpidacpi(id->id))
+			continue;
+		pnp_add_id(dev, id->id);
 	}
 
 	/* clear out the damaged flags */
@@ -232,9 +230,8 @@
 	struct pnp_dev *pnp = _pnp;
 
 	/* true means it matched */
-	return acpi->flags.hardware_id
-	    && !acpi_get_physical_device(acpi->handle)
-	    && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
+	return !acpi_get_physical_device(acpi->handle)
+	    && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
 }
 
 static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 614b3a7..3441b3f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -26,7 +26,6 @@
 #include <linux/completion.h>
 #include <linux/list.h>
 #include <scsi/scsi.h>
-#include <linux/kref.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/cdev.h>
 #include <net/netlink.h>
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 273de5d..0ab9907 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -43,7 +43,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/kref.h>
 
 #include "sisusb.h"
 
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 90861cd..09bfa96 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@
 	  Say y here to support the LCD panels usually found on SHARP
 	  corgi (C7x0) and spitz (Cxx00) models.
 
+config LCD_LMS283GF05
+	tristate "Samsung LMS283GF05 LCD"
+	depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+	help
+	  SPI driver for Samsung LMS283GF05. This provides basic support
+	  for powering the LCD up/down through a sysfs interface.
+
 config LCD_LTV350QV
 	tristate "Samsung LTV350QV LCD Panel"
 	depends on LCD_CLASS_DEVICE && SPI_MASTER
@@ -229,3 +236,29 @@
 	help
 	  If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
 	  backlight driver.
+
+config BACKLIGHT_WM831X
+	tristate "WM831x PMIC Backlight Driver"
+	depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
+	help
+	  If you have a backlight driven by the ISINK and DCDC of a
+	  WM831x PMIC say y to enable the backlight driver for it.
+
+config BACKLIGHT_ADX
+	tristate "Avionic Design Xanthos Backlight Driver"
+	depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
+	default y
+	help
+	  Say Y to enable the backlight driver on Avionic Design Xanthos-based
+	  boards.
+
+config BACKLIGHT_ADP5520
+	tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
+	depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
+	help
+	  If you have a LCD backlight connected to the BST/BL_SNK output of
+	  ADP5520 or ADP5501, say Y here to enable this driver.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called adp5520_bl.
+
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4eb178c..9a40554 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_LCD_CLASS_DEVICE)     += lcd.o
 obj-$(CONFIG_LCD_CORGI)		   += corgi_lcd.o
 obj-$(CONFIG_LCD_HP700)		   += jornada720_lcd.o
+obj-$(CONFIG_LCD_LMS283GF05)	   += lms283gf05.o
 obj-$(CONFIG_LCD_LTV350QV)	   += ltv350qv.o
 obj-$(CONFIG_LCD_ILI9320)	   += ili9320.o
 obj-$(CONFIG_LCD_PLATFORM)	   += platform_lcd.o
@@ -24,4 +25,7 @@
 obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
 obj-$(CONFIG_BACKLIGHT_TOSA)	+= tosa_bl.o
 obj-$(CONFIG_BACKLIGHT_SAHARA)	+= kb3886_bl.o
+obj-$(CONFIG_BACKLIGHT_WM831X)	+= wm831x_bl.o
+obj-$(CONFIG_BACKLIGHT_ADX)    += adx_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP5520)	+= adp5520_bl.o
 
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
new file mode 100644
index 0000000..ad05da5
--- /dev/null
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -0,0 +1,377 @@
+/*
+ * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_bl {
+	struct device *master;
+	struct adp5520_backlight_platfrom_data *pdata;
+	struct mutex lock;
+	unsigned long cached_daylight_max;
+	int id;
+	int current_brightness;
+};
+
+static int adp5520_bl_set(struct backlight_device *bl, int brightness)
+{
+	struct adp5520_bl *data = bl_get_data(bl);
+	struct device *master = data->master;
+	int ret = 0;
+
+	if (data->pdata->en_ambl_sens) {
+		if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
+			/* Disable Ambient Light auto adjust */
+			ret |= adp5520_clr_bits(master, BL_CONTROL,
+					BL_AUTO_ADJ);
+			ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+		} else {
+			/*
+			 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
+			 * restore daylight l3 sysfs brightness
+			 */
+			ret |= adp5520_write(master, DAYLIGHT_MAX,
+					 data->cached_daylight_max);
+			ret |= adp5520_set_bits(master, BL_CONTROL,
+					 BL_AUTO_ADJ);
+		}
+	} else {
+		ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+	}
+
+	if (data->current_brightness && brightness == 0)
+		ret |= adp5520_set_bits(master,
+				MODE_STATUS, DIM_EN);
+	else if (data->current_brightness == 0 && brightness)
+		ret |= adp5520_clr_bits(master,
+				MODE_STATUS, DIM_EN);
+
+	if (!ret)
+		data->current_brightness = brightness;
+
+	return ret;
+}
+
+static int adp5520_bl_update_status(struct backlight_device *bl)
+{
+	int brightness = bl->props.brightness;
+	if (bl->props.power != FB_BLANK_UNBLANK)
+		brightness = 0;
+
+	if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+		brightness = 0;
+
+	return adp5520_bl_set(bl, brightness);
+}
+
+static int adp5520_bl_get_brightness(struct backlight_device *bl)
+{
+	struct adp5520_bl *data = bl_get_data(bl);
+	int error;
+	uint8_t reg_val;
+
+	error = adp5520_read(data->master, BL_VALUE, &reg_val);
+
+	return error ? data->current_brightness : reg_val;
+}
+
+static struct backlight_ops adp5520_bl_ops = {
+	.update_status	= adp5520_bl_update_status,
+	.get_brightness	= adp5520_bl_get_brightness,
+};
+
+static int adp5520_bl_setup(struct backlight_device *bl)
+{
+	struct adp5520_bl *data = bl_get_data(bl);
+	struct device *master = data->master;
+	struct adp5520_backlight_platfrom_data *pdata = data->pdata;
+	int ret = 0;
+
+	ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
+	ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
+
+	if (pdata->en_ambl_sens) {
+		data->cached_daylight_max = pdata->l1_daylight_max;
+		ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
+		ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
+		ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
+		ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
+		ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
+		ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
+		ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
+		ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
+		ret |= adp5520_write(master, ALS_CMPR_CFG,
+			ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
+	}
+
+	ret |= adp5520_write(master, BL_CONTROL,
+			BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
+
+	ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
+			pdata->fade_out));
+
+	ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
+
+	return ret;
+}
+
+static ssize_t adp5520_show(struct device *dev, char *buf, int reg)
+{
+	struct adp5520_bl *data = dev_get_drvdata(dev);
+	int error;
+	uint8_t reg_val;
+
+	mutex_lock(&data->lock);
+	error = adp5520_read(data->master, reg, &reg_val);
+	mutex_unlock(&data->lock);
+
+	return sprintf(buf, "%u\n", reg_val);
+}
+
+static ssize_t adp5520_store(struct device *dev, const char *buf,
+			 size_t count, int reg)
+{
+	struct adp5520_bl *data = dev_get_drvdata(dev);
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&data->lock);
+	adp5520_write(data->master, reg, val);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t adp5520_bl_dark_max_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, DARK_MAX);
+}
+
+static ssize_t adp5520_bl_dark_max_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return adp5520_store(dev, buf, count, DARK_MAX);
+}
+static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
+			adp5520_bl_dark_max_store);
+
+static ssize_t adp5520_bl_office_max_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, OFFICE_MAX);
+}
+
+static ssize_t adp5520_bl_office_max_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return adp5520_store(dev, buf, count, OFFICE_MAX);
+}
+static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
+			adp5520_bl_office_max_store);
+
+static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, DAYLIGHT_MAX);
+}
+
+static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct adp5520_bl *data = dev_get_drvdata(dev);
+
+	strict_strtoul(buf, 10, &data->cached_daylight_max);
+	return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
+}
+static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
+			adp5520_bl_daylight_max_store);
+
+static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, DARK_DIM);
+}
+
+static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return adp5520_store(dev, buf, count, DARK_DIM);
+}
+static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
+			adp5520_bl_dark_dim_store);
+
+static ssize_t adp5520_bl_office_dim_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, OFFICE_DIM);
+}
+
+static ssize_t adp5520_bl_office_dim_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return adp5520_store(dev, buf, count, OFFICE_DIM);
+}
+static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
+			adp5520_bl_office_dim_store);
+
+static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	return adp5520_show(dev, buf, DAYLIGHT_DIM);
+}
+
+static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
+}
+static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
+			adp5520_bl_daylight_dim_store);
+
+static struct attribute *adp5520_bl_attributes[] = {
+	&dev_attr_dark_max.attr,
+	&dev_attr_dark_dim.attr,
+	&dev_attr_office_max.attr,
+	&dev_attr_office_dim.attr,
+	&dev_attr_daylight_max.attr,
+	&dev_attr_daylight_dim.attr,
+	NULL
+};
+
+static const struct attribute_group adp5520_bl_attr_group = {
+	.attrs = adp5520_bl_attributes,
+};
+
+static int __devinit adp5520_bl_probe(struct platform_device *pdev)
+{
+	struct backlight_device *bl;
+	struct adp5520_bl *data;
+	int ret = 0;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	data->master = pdev->dev.parent;
+	data->pdata = pdev->dev.platform_data;
+
+	if (data->pdata  == NULL) {
+		dev_err(&pdev->dev, "missing platform data\n");
+		kfree(data);
+		return -ENODEV;
+	}
+
+	data->id = pdev->id;
+	data->current_brightness = 0;
+
+	mutex_init(&data->lock);
+
+	bl = backlight_device_register(pdev->name, data->master,
+			data, &adp5520_bl_ops);
+	if (IS_ERR(bl)) {
+		dev_err(&pdev->dev, "failed to register backlight\n");
+		kfree(data);
+		return PTR_ERR(bl);
+	}
+
+	bl->props.max_brightness =
+		bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
+
+	if (data->pdata->en_ambl_sens)
+		ret = sysfs_create_group(&bl->dev.kobj,
+			&adp5520_bl_attr_group);
+
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register sysfs\n");
+		backlight_device_unregister(bl);
+		kfree(data);
+	}
+
+	platform_set_drvdata(pdev, bl);
+	ret |= adp5520_bl_setup(bl);
+	backlight_update_status(bl);
+
+	return ret;
+}
+
+static int __devexit adp5520_bl_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bl = platform_get_drvdata(pdev);
+	struct adp5520_bl *data = bl_get_data(bl);
+
+	adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
+
+	if (data->pdata->en_ambl_sens)
+		sysfs_remove_group(&bl->dev.kobj,
+				&adp5520_bl_attr_group);
+
+	backlight_device_unregister(bl);
+	kfree(data);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp5520_bl_suspend(struct platform_device *pdev,
+				 pm_message_t state)
+{
+	struct backlight_device *bl = platform_get_drvdata(pdev);
+	return adp5520_bl_set(bl, 0);
+}
+
+static int adp5520_bl_resume(struct platform_device *pdev)
+{
+	struct backlight_device *bl = platform_get_drvdata(pdev);
+
+	backlight_update_status(bl);
+	return 0;
+}
+#else
+#define adp5520_bl_suspend	NULL
+#define adp5520_bl_resume	NULL
+#endif
+
+static struct platform_driver adp5520_bl_driver = {
+	.driver		= {
+		.name	= "adp5520-backlight",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= adp5520_bl_probe,
+	.remove		= __devexit_p(adp5520_bl_remove),
+	.suspend	= adp5520_bl_suspend,
+	.resume		= adp5520_bl_resume,
+};
+
+static int __init adp5520_bl_init(void)
+{
+	return platform_driver_register(&adp5520_bl_driver);
+}
+module_init(adp5520_bl_init);
+
+static void __exit adp5520_bl_exit(void)
+{
+	platform_driver_unregister(&adp5520_bl_driver);
+}
+module_exit(adp5520_bl_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
new file mode 100644
index 0000000..2c3bdfc
--- /dev/null
+++ b/drivers/video/backlight/adx_bl.c
@@ -0,0 +1,178 @@
+/*
+ * linux/drivers/video/backlight/adx.c
+ *
+ * Copyright (C) 2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* register definitions */
+#define ADX_BACKLIGHT_CONTROL		0x00
+#define ADX_BACKLIGHT_CONTROL_ENABLE	(1 << 0)
+#define ADX_BACKLIGHT_BRIGHTNESS	0x08
+#define ADX_BACKLIGHT_STATUS		0x10
+#define ADX_BACKLIGHT_ERROR		0x18
+
+struct adxbl {
+	void __iomem *base;
+};
+
+static int adx_backlight_update_status(struct backlight_device *bldev)
+{
+	struct adxbl *bl = bl_get_data(bldev);
+	u32 value;
+
+	value = bldev->props.brightness;
+	writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
+
+	value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
+
+	if (bldev->props.state & BL_CORE_FBBLANK)
+		value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
+	else
+		value |= ADX_BACKLIGHT_CONTROL_ENABLE;
+
+	writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
+
+	return 0;
+}
+
+static int adx_backlight_get_brightness(struct backlight_device *bldev)
+{
+	struct adxbl *bl = bl_get_data(bldev);
+	u32 brightness;
+
+	brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
+	return brightness & 0xff;
+}
+
+static int adx_backlight_check_fb(struct fb_info *fb)
+{
+	return 1;
+}
+
+static struct backlight_ops adx_backlight_ops = {
+	.options = 0,
+	.update_status = adx_backlight_update_status,
+	.get_brightness = adx_backlight_get_brightness,
+	.check_fb = adx_backlight_check_fb,
+};
+
+static int __devinit adx_backlight_probe(struct platform_device *pdev)
+{
+	struct backlight_device *bldev;
+	struct resource *res;
+	struct adxbl *bl;
+	int ret = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	res = devm_request_mem_region(&pdev->dev, res->start,
+			resource_size(res), res->name);
+	if (!res) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
+	if (!bl) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+	if (!bl->base) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl,
+			&adx_backlight_ops);
+	if (!bldev) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	bldev->props.max_brightness = 0xff;
+	bldev->props.brightness = 0xff;
+	bldev->props.power = FB_BLANK_UNBLANK;
+
+	platform_set_drvdata(pdev, bldev);
+
+out:
+	return ret;
+}
+
+static int __devexit adx_backlight_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bldev;
+	int ret = 0;
+
+	bldev = platform_get_drvdata(pdev);
+	bldev->props.power = FB_BLANK_UNBLANK;
+	bldev->props.brightness = 0xff;
+	backlight_update_status(bldev);
+	backlight_device_unregister(bldev);
+	platform_set_drvdata(pdev, NULL);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int adx_backlight_suspend(struct platform_device *pdev,
+		pm_message_t state)
+{
+	return 0;
+}
+
+static int adx_backlight_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+#else
+#define adx_backlight_suspend NULL
+#define adx_backlight_resume NULL
+#endif
+
+static struct platform_driver adx_backlight_driver = {
+	.probe = adx_backlight_probe,
+	.remove = __devexit_p(adx_backlight_remove),
+	.suspend = adx_backlight_suspend,
+	.resume = adx_backlight_resume,
+	.driver = {
+		.name = "adx-backlight",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init adx_backlight_init(void)
+{
+	return platform_driver_register(&adx_backlight_driver);
+}
+
+static void __exit adx_backlight_exit(void)
+{
+	platform_driver_unregister(&adx_backlight_driver);
+}
+
+module_init(adx_backlight_init);
+module_exit(adx_backlight_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 157057c..6615ac7 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -73,6 +73,27 @@
 }
 #endif /* CONFIG_FB */
 
+static void backlight_generate_event(struct backlight_device *bd,
+				     enum backlight_update_reason reason)
+{
+	char *envp[2];
+
+	switch (reason) {
+	case BACKLIGHT_UPDATE_SYSFS:
+		envp[0] = "SOURCE=sysfs";
+		break;
+	case BACKLIGHT_UPDATE_HOTKEY:
+		envp[0] = "SOURCE=hotkey";
+		break;
+	default:
+		envp[0] = "SOURCE=unknown";
+		break;
+	}
+	envp[1] = NULL;
+	kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp);
+	sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness");
+}
+
 static ssize_t backlight_show_power(struct device *dev,
 		struct device_attribute *attr,char *buf)
 {
@@ -142,6 +163,8 @@
 	}
 	mutex_unlock(&bd->ops_lock);
 
+	backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
+
 	return rc;
 }
 
@@ -214,6 +237,25 @@
 };
 
 /**
+ * backlight_force_update - tell the backlight subsystem that hardware state
+ *   has changed
+ * @bd: the backlight device to update
+ *
+ * Updates the internal state of the backlight in response to a hardware event,
+ * and generate a uevent to notify userspace
+ */
+void backlight_force_update(struct backlight_device *bd,
+			    enum backlight_update_reason reason)
+{
+	mutex_lock(&bd->ops_lock);
+	if (bd->ops && bd->ops->get_brightness)
+		bd->props.brightness = bd->ops->get_brightness(bd);
+	mutex_unlock(&bd->ops_lock);
+	backlight_generate_event(bd, reason);
+}
+EXPORT_SYMBOL(backlight_force_update);
+
+/**
  * backlight_device_register - create and register a new object of
  *   backlight_device class.
  * @name: the name of the new object(must be the same as the name of the
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 5be55a2..7fb4eef 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -103,7 +103,7 @@
 	.update_status  = hp680bl_set_intensity,
 };
 
-static int __init hp680bl_probe(struct platform_device *pdev)
+static int __devinit hp680bl_probe(struct platform_device *pdev)
 {
 	struct backlight_device *bd;
 
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
new file mode 100644
index 0000000..447b542
--- /dev/null
+++ b/drivers/video/backlight/lms283gf05.c
@@ -0,0 +1,242 @@
+/*
+ * lms283gf05.c -- support for Samsung LMS283GF05 LCD
+ *
+ * Copyright (c) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/lcd.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/lms283gf05.h>
+
+struct lms283gf05_state {
+	struct spi_device	*spi;
+	struct lcd_device	*ld;
+};
+
+struct lms283gf05_seq {
+	unsigned char		reg;
+	unsigned short		value;
+	unsigned char		delay;
+};
+
+/* Magic sequences supplied by manufacturer, for details refer to datasheet */
+static struct lms283gf05_seq disp_initseq[] = {
+	/* REG, VALUE, DELAY */
+	{ 0x07, 0x0000, 0 },
+	{ 0x13, 0x0000, 10 },
+
+	{ 0x11, 0x3004, 0 },
+	{ 0x14, 0x200F, 0 },
+	{ 0x10, 0x1a20, 0 },
+	{ 0x13, 0x0040, 50 },
+
+	{ 0x13, 0x0060, 0 },
+	{ 0x13, 0x0070, 200 },
+
+	{ 0x01, 0x0127, 0 },
+	{ 0x02,	0x0700, 0 },
+	{ 0x03, 0x1030, 0 },
+	{ 0x08, 0x0208, 0 },
+	{ 0x0B, 0x0620, 0 },
+	{ 0x0C, 0x0110, 0 },
+	{ 0x30, 0x0120, 0 },
+	{ 0x31, 0x0127, 0 },
+	{ 0x32, 0x0000, 0 },
+	{ 0x33, 0x0503, 0 },
+	{ 0x34, 0x0727, 0 },
+	{ 0x35, 0x0124, 0 },
+	{ 0x36, 0x0706, 0 },
+	{ 0x37, 0x0701, 0 },
+	{ 0x38, 0x0F00, 0 },
+	{ 0x39, 0x0F00, 0 },
+	{ 0x40, 0x0000, 0 },
+	{ 0x41, 0x0000, 0 },
+	{ 0x42, 0x013f, 0 },
+	{ 0x43, 0x0000, 0 },
+	{ 0x44, 0x013f, 0 },
+	{ 0x45, 0x0000, 0 },
+	{ 0x46, 0xef00, 0 },
+	{ 0x47, 0x013f, 0 },
+	{ 0x48, 0x0000, 0 },
+	{ 0x07, 0x0015, 30 },
+
+	{ 0x07, 0x0017, 0 },
+
+	{ 0x20, 0x0000, 0 },
+	{ 0x21, 0x0000, 0 },
+	{ 0x22, 0x0000, 0 }
+};
+
+static struct lms283gf05_seq disp_pdwnseq[] = {
+	{ 0x07, 0x0016, 30 },
+
+	{ 0x07, 0x0004, 0 },
+	{ 0x10, 0x0220, 20 },
+
+	{ 0x13, 0x0060, 50 },
+
+	{ 0x13, 0x0040, 50 },
+
+	{ 0x13, 0x0000, 0 },
+	{ 0x10, 0x0000, 0 }
+};
+
+
+static void lms283gf05_reset(unsigned long gpio, bool inverted)
+{
+	gpio_set_value(gpio, !inverted);
+	mdelay(100);
+	gpio_set_value(gpio, inverted);
+	mdelay(20);
+	gpio_set_value(gpio, !inverted);
+	mdelay(20);
+}
+
+static void lms283gf05_toggle(struct spi_device *spi,
+			struct lms283gf05_seq *seq, int sz)
+{
+	char buf[3];
+	int i;
+
+	for (i = 0; i < sz; i++) {
+		buf[0] = 0x74;
+		buf[1] = 0x00;
+		buf[2] = seq[i].reg;
+		spi_write(spi, buf, 3);
+
+		buf[0] = 0x76;
+		buf[1] = seq[i].value >> 8;
+		buf[2] = seq[i].value & 0xff;
+		spi_write(spi, buf, 3);
+
+		mdelay(seq[i].delay);
+	}
+}
+
+static int lms283gf05_power_set(struct lcd_device *ld, int power)
+{
+	struct lms283gf05_state *st = lcd_get_data(ld);
+	struct spi_device *spi = st->spi;
+	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+
+	if (power) {
+		if (pdata)
+			lms283gf05_reset(pdata->reset_gpio,
+					pdata->reset_inverted);
+		lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
+	} else {
+		lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
+		if (pdata)
+			gpio_set_value(pdata->reset_gpio,
+					pdata->reset_inverted);
+	}
+
+	return 0;
+}
+
+static struct lcd_ops lms_ops = {
+	.set_power	= lms283gf05_power_set,
+	.get_power	= NULL,
+};
+
+static int __devinit lms283gf05_probe(struct spi_device *spi)
+{
+	struct lms283gf05_state *st;
+	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
+	struct lcd_device *ld;
+	int ret = 0;
+
+	if (pdata != NULL) {
+		ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
+		if (ret)
+			return ret;
+
+		ret = gpio_direction_output(pdata->reset_gpio,
+						!pdata->reset_inverted);
+		if (ret)
+			goto err;
+	}
+
+	st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+	if (st == NULL) {
+		dev_err(&spi->dev, "No memory for device state\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
+	if (IS_ERR(ld)) {
+		ret = PTR_ERR(ld);
+		goto err2;
+	}
+
+	st->spi = spi;
+	st->ld = ld;
+
+	dev_set_drvdata(&spi->dev, st);
+
+	/* kick in the LCD */
+	if (pdata)
+		lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
+	lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
+
+	return 0;
+
+err2:
+	kfree(st);
+err:
+	if (pdata != NULL)
+		gpio_free(pdata->reset_gpio);
+
+	return ret;
+}
+
+static int __devexit lms283gf05_remove(struct spi_device *spi)
+{
+	struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
+	struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
+
+	lcd_device_unregister(st->ld);
+
+	if (pdata != NULL)
+		gpio_free(pdata->reset_gpio);
+
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver lms283gf05_driver = {
+	.driver = {
+		.name	= "lms283gf05",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= lms283gf05_probe,
+	.remove		= __devexit_p(lms283gf05_remove),
+};
+
+static __init int lms283gf05_init(void)
+{
+	return spi_register_driver(&lms283gf05_driver);
+}
+
+static __exit void lms283gf05_exit(void)
+{
+	spi_unregister_driver(&lms283gf05_driver);
+}
+
+module_init(lms283gf05_init);
+module_exit(lms283gf05_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("LCD283GF05 LCD");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 3bb4c0a..9edb8d7 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -166,6 +166,15 @@
 	},
 	{
 		.callback	= mbp_dmi_match,
+		.ident		= "MacBookAir 1,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
 		.ident		= "MacBook 5,1",
 		.matches	= {
 			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -175,6 +184,15 @@
 	},
 	{
 		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 5,2",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
 		.ident		= "MacBookAir 2,1",
 		.matches	= {
 			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -191,6 +209,24 @@
 		},
 		.driver_data	= (void *)&nvidia_chipset_data,
 	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookPro 5,2",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookPro 5,5",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
 	{ }
 };
 
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
new file mode 100644
index 0000000..467bdb7
--- /dev/null
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -0,0 +1,250 @@
+/*
+ * Backlight driver for Wolfson Microelectronics WM831x PMICs
+ *
+ * Copyright 2009 Wolfson Microelectonics plc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/regulator.h>
+
+struct wm831x_backlight_data {
+	struct wm831x *wm831x;
+	int isink_reg;
+	int current_brightness;
+};
+
+static int wm831x_backlight_set(struct backlight_device *bl, int brightness)
+{
+	struct wm831x_backlight_data *data = bl_get_data(bl);
+	struct wm831x *wm831x = data->wm831x;
+	int power_up = !data->current_brightness && brightness;
+	int power_down = data->current_brightness && !brightness;
+	int ret;
+
+	if (power_up) {
+		/* Enable the ISINK */
+		ret = wm831x_set_bits(wm831x, data->isink_reg,
+				      WM831X_CS1_ENA, WM831X_CS1_ENA);
+		if (ret < 0)
+			goto err;
+
+		/* Enable the DC-DC */
+		ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
+				      WM831X_DC4_ENA, WM831X_DC4_ENA);
+		if (ret < 0)
+			goto err;
+	}
+
+	if (power_down) {
+		/* DCDC first */
+		ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
+				      WM831X_DC4_ENA, 0);
+		if (ret < 0)
+			goto err;
+
+		/* ISINK */
+		ret = wm831x_set_bits(wm831x, data->isink_reg,
+				      WM831X_CS1_DRIVE | WM831X_CS1_ENA, 0);
+		if (ret < 0)
+			goto err;
+	}
+
+	/* Set the new brightness */
+	ret = wm831x_set_bits(wm831x, data->isink_reg,
+			      WM831X_CS1_ISEL_MASK, brightness);
+	if (ret < 0)
+		goto err;
+
+	if (power_up) {
+		/* Drive current through the ISINK */
+		ret = wm831x_set_bits(wm831x, data->isink_reg,
+				      WM831X_CS1_DRIVE, WM831X_CS1_DRIVE);
+		if (ret < 0)
+			return ret;
+	}
+
+	data->current_brightness = brightness;
+
+	return 0;
+
+err:
+	/* If we were in the middle of a power transition always shut down
+	 * for safety.
+	 */
+	if (power_up || power_down) {
+		wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
+		wm831x_set_bits(wm831x, data->isink_reg, WM831X_CS1_ENA, 0);
+	}
+
+	return ret;
+}
+
+static int wm831x_backlight_update_status(struct backlight_device *bl)
+{
+	int brightness = bl->props.brightness;
+
+	if (bl->props.power != FB_BLANK_UNBLANK)
+		brightness = 0;
+
+	if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+		brightness = 0;
+
+	if (bl->props.state & BL_CORE_SUSPENDED)
+		brightness = 0;
+
+	return wm831x_backlight_set(bl, brightness);
+}
+
+static int wm831x_backlight_get_brightness(struct backlight_device *bl)
+{
+	struct wm831x_backlight_data *data = bl_get_data(bl);
+	return data->current_brightness;
+}
+
+static struct backlight_ops wm831x_backlight_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.update_status	= wm831x_backlight_update_status,
+	.get_brightness	= wm831x_backlight_get_brightness,
+};
+
+static int wm831x_backlight_probe(struct platform_device *pdev)
+{
+	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+	struct wm831x_pdata *wm831x_pdata;
+	struct wm831x_backlight_pdata *pdata;
+	struct wm831x_backlight_data *data;
+	struct backlight_device *bl;
+	int ret, i, max_isel, isink_reg, dcdc_cfg;
+
+	/* We need platform data */
+	if (pdev->dev.parent->platform_data) {
+		wm831x_pdata = pdev->dev.parent->platform_data;
+		pdata = wm831x_pdata->backlight;
+	} else {
+		pdata = NULL;
+	}
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data supplied\n");
+		return -EINVAL;
+	}
+
+	/* Figure out the maximum current we can use */
+	for (i = 0; i < WM831X_ISINK_MAX_ISEL; i++) {
+		if (wm831x_isinkv_values[i] > pdata->max_uA)
+			break;
+	}
+
+	if (i == 0) {
+		dev_err(&pdev->dev, "Invalid max_uA: %duA\n", pdata->max_uA);
+		return -EINVAL;
+	}
+	max_isel = i - 1;
+
+	if (pdata->max_uA != wm831x_isinkv_values[max_isel])
+		dev_warn(&pdev->dev,
+			 "Maximum current is %duA not %duA as requested\n",
+			 wm831x_isinkv_values[max_isel], pdata->max_uA);
+
+	switch (pdata->isink) {
+	case 1:
+		isink_reg = WM831X_CURRENT_SINK_1;
+		dcdc_cfg = 0;
+		break;
+	case 2:
+		isink_reg = WM831X_CURRENT_SINK_2;
+		dcdc_cfg = WM831X_DC4_FBSRC;
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid ISINK %d\n", pdata->isink);
+		return -EINVAL;
+	}
+
+	/* Configure the ISINK to use for feedback */
+	ret = wm831x_reg_unlock(wm831x);
+	if (ret < 0)
+		return ret;
+
+	ret = wm831x_set_bits(wm831x, WM831X_DC4_CONTROL, WM831X_DC4_FBSRC,
+			      dcdc_cfg);
+
+	wm831x_reg_lock(wm831x);
+	if (ret < 0)
+		return ret;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	data->wm831x = wm831x;
+	data->current_brightness = 0;
+	data->isink_reg = isink_reg;
+
+	bl = backlight_device_register("wm831x", &pdev->dev,
+			data, &wm831x_backlight_ops);
+	if (IS_ERR(bl)) {
+		dev_err(&pdev->dev, "failed to register backlight\n");
+		kfree(data);
+		return PTR_ERR(bl);
+	}
+
+	bl->props.max_brightness = max_isel;
+	bl->props.brightness = max_isel;
+
+	platform_set_drvdata(pdev, bl);
+
+	/* Disable the DCDC if it was started so we can bootstrap */
+	wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
+
+
+	backlight_update_status(bl);
+
+	return 0;
+}
+
+static int wm831x_backlight_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bl = platform_get_drvdata(pdev);
+	struct wm831x_backlight_data *data = bl_get_data(bl);
+
+	backlight_device_unregister(bl);
+	kfree(data);
+	return 0;
+}
+
+static struct platform_driver wm831x_backlight_driver = {
+	.driver		= {
+		.name	= "wm831x-backlight",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm831x_backlight_probe,
+	.remove		= wm831x_backlight_remove,
+};
+
+static int __init wm831x_backlight_init(void)
+{
+	return platform_driver_register(&wm831x_backlight_driver);
+}
+module_init(wm831x_backlight_init);
+
+static void __exit wm831x_backlight_exit(void)
+{
+	platform_driver_unregister(&wm831x_backlight_driver);
+}
+module_exit(wm831x_backlight_exit);
+
+MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-backlight");
diff --git a/fs/buffer.c b/fs/buffer.c
index 24afd74..6fa5302 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -280,7 +280,7 @@
 EXPORT_SYMBOL(invalidate_bdev);
 
 /*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
 static void free_more_memory(void)
 {
@@ -1709,9 +1709,9 @@
 		/*
 		 * If it's a fully non-blocking write attempt and we cannot
 		 * lock the buffer then redirty the page.  Note that this can
-		 * potentially cause a busy-wait loop from pdflush and kswapd
-		 * activity, but those code paths have their own higher-level
-		 * throttling.
+		 * potentially cause a busy-wait loop from writeback threads
+		 * and kswapd activity, but those code paths have their own
+		 * higher-level throttling.
 		 */
 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
 			lock_buffer(bh);
@@ -3208,7 +3208,7 @@
  * still running obsolete flush daemons, so we terminate them here.
  *
  * Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
  */
 SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 6994a0f..80f3525 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,6 +2,7 @@
 	tristate "CIFS support (advanced network filesystem, SMBFS successor)"
 	depends on INET
 	select NLS
+	select SLOW_WORK
 	help
 	  This is the client VFS module for the Common Internet File System
 	  (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 90c5b39..9a5e4f5 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,9 +64,6 @@
 unsigned int extended_security = CIFSSEC_DEF;
 /* unsigned int ntlmv2_support = 0; */
 unsigned int sign_CIFS_PDUs = 1;
-extern struct task_struct *oplockThread; /* remove sparse warning */
-struct task_struct *oplockThread = NULL;
-/* extern struct task_struct * dnotifyThread; remove sparse warning */
 static const struct super_operations cifs_super_ops;
 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 module_param(CIFSMaxBufSize, int, 0);
@@ -972,89 +969,12 @@
 	kmem_cache_destroy(cifs_oplock_cachep);
 }
 
-static int cifs_oplock_thread(void *dummyarg)
-{
-	struct oplock_q_entry *oplock_item;
-	struct cifsTconInfo *pTcon;
-	struct inode *inode;
-	__u16  netfid;
-	int rc, waitrc = 0;
-
-	set_freezable();
-	do {
-		if (try_to_freeze())
-			continue;
-
-		spin_lock(&cifs_oplock_lock);
-		if (list_empty(&cifs_oplock_list)) {
-			spin_unlock(&cifs_oplock_lock);
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(39*HZ);
-		} else {
-			oplock_item = list_entry(cifs_oplock_list.next,
-						struct oplock_q_entry, qhead);
-			cFYI(1, ("found oplock item to write out"));
-			pTcon = oplock_item->tcon;
-			inode = oplock_item->pinode;
-			netfid = oplock_item->netfid;
-			spin_unlock(&cifs_oplock_lock);
-			DeleteOplockQEntry(oplock_item);
-			/* can not grab inode sem here since it would
-				deadlock when oplock received on delete
-				since vfs_unlink holds the i_mutex across
-				the call */
-			/* mutex_lock(&inode->i_mutex);*/
-			if (S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-				if (CIFS_I(inode)->clientCanCacheAll == 0)
-					break_lease(inode, FMODE_READ);
-				else if (CIFS_I(inode)->clientCanCacheRead == 0)
-					break_lease(inode, FMODE_WRITE);
-#endif
-				rc = filemap_fdatawrite(inode->i_mapping);
-				if (CIFS_I(inode)->clientCanCacheRead == 0) {
-					waitrc = filemap_fdatawait(
-							      inode->i_mapping);
-					invalidate_remote_inode(inode);
-				}
-				if (rc == 0)
-					rc = waitrc;
-			} else
-				rc = 0;
-			/* mutex_unlock(&inode->i_mutex);*/
-			if (rc)
-				CIFS_I(inode)->write_behind_rc = rc;
-			cFYI(1, ("Oplock flush inode %p rc %d",
-				inode, rc));
-
-				/* releasing stale oplock after recent reconnect
-				of smb session using a now incorrect file
-				handle is not a data integrity issue but do
-				not bother sending an oplock release if session
-				to server still is disconnected since oplock
-				already released by the server in that case */
-			if (!pTcon->need_reconnect) {
-				rc = CIFSSMBLock(0, pTcon, netfid,
-						0 /* len */ , 0 /* offset */, 0,
-						0, LOCKING_ANDX_OPLOCK_RELEASE,
-						false /* wait flag */);
-				cFYI(1, ("Oplock release rc = %d", rc));
-			}
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(1);  /* yield in case q were corrupt */
-		}
-	} while (!kthread_should_stop());
-
-	return 0;
-}
-
 static int __init
 init_cifs(void)
 {
 	int rc = 0;
 	cifs_proc_init();
 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
-	INIT_LIST_HEAD(&cifs_oplock_list);
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	INIT_LIST_HEAD(&GlobalDnotifyReqList);
 	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
@@ -1083,7 +1003,6 @@
 	rwlock_init(&GlobalSMBSeslock);
 	rwlock_init(&cifs_tcp_ses_lock);
 	spin_lock_init(&GlobalMid_Lock);
-	spin_lock_init(&cifs_oplock_lock);
 
 	if (cifs_max_pending < 2) {
 		cifs_max_pending = 2;
@@ -1118,16 +1037,13 @@
 	if (rc)
 		goto out_unregister_key_type;
 #endif
-	oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
-	if (IS_ERR(oplockThread)) {
-		rc = PTR_ERR(oplockThread);
-		cERROR(1, ("error %d create oplock thread", rc));
-		goto out_unregister_dfs_key_type;
-	}
+	rc = slow_work_register_user();
+	if (rc)
+		goto out_unregister_resolver_key;
 
 	return 0;
 
- out_unregister_dfs_key_type:
+ out_unregister_resolver_key:
 #ifdef CONFIG_CIFS_DFS_UPCALL
 	unregister_key_type(&key_type_dns_resolver);
  out_unregister_key_type:
@@ -1164,7 +1080,6 @@
 	cifs_destroy_inodecache();
 	cifs_destroy_mids();
 	cifs_destroy_request_bufs();
-	kthread_stop(oplockThread);
 }
 
 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6cfc81a..5d0fde1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -18,6 +18,7 @@
  */
 #include <linux/in.h>
 #include <linux/in6.h>
+#include <linux/slow-work.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
 /*
@@ -346,14 +347,16 @@
 	/* lock scope id (0 if none) */
 	struct file *pfile; /* needed for writepage */
 	struct inode *pInode; /* needed for oplock break */
+	struct vfsmount *mnt;
 	struct mutex lock_mutex;
 	struct list_head llist; /* list of byte range locks we have. */
 	bool closePend:1;	/* file is marked to close */
 	bool invalidHandle:1;	/* file closed via session abend */
-	bool messageMode:1;	/* for pipes: message vs byte mode */
+	bool oplock_break_cancelled:1;
 	atomic_t count;		/* reference count */
 	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
 	struct cifs_search_info srch_inf;
+	struct slow_work oplock_break; /* slow_work job for oplock breaks */
 };
 
 /* Take a reference on the file private data */
@@ -365,8 +368,10 @@
 /* Release a reference on the file private data */
 static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
-	if (atomic_dec_and_test(&cifs_file->count))
+	if (atomic_dec_and_test(&cifs_file->count)) {
+		iput(cifs_file->pInode);
 		kfree(cifs_file);
+	}
 }
 
 /*
@@ -382,7 +387,6 @@
 	unsigned long time;	/* jiffies of last update/check of inode */
 	bool clientCanCacheRead:1;	/* read oplock */
 	bool clientCanCacheAll:1;	/* read and writebehind oplock */
-	bool oplockPending:1;
 	bool delete_pending:1;		/* DELETE_ON_CLOSE is set */
 	u64  server_eof;		/* current file size on server */
 	u64  uniqueid;			/* server inode number */
@@ -585,9 +589,9 @@
 #define   CIFSSEC_MUST_LANMAN	0x10010
 #define   CIFSSEC_MUST_PLNTXT	0x20020
 #ifdef CONFIG_CIFS_UPCALL
-#define   CIFSSEC_MASK          0xAF0AF /* allows weak security but also krb5 */
+#define   CIFSSEC_MASK          0xBF0BF /* allows weak security but also krb5 */
 #else
-#define   CIFSSEC_MASK          0xA70A7 /* current flags supported if weak */
+#define   CIFSSEC_MASK          0xB70B7 /* current flags supported if weak */
 #endif /* UPCALL */
 #else /* do not allow weak pw hash */
 #ifdef CONFIG_CIFS_UPCALL
@@ -669,12 +673,6 @@
  */
 GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
 
-/* Global list of oplocks */
-GLOBAL_EXTERN struct list_head cifs_oplock_list;
-
-/* Protects the cifs_oplock_list */
-GLOBAL_EXTERN spinlock_t cifs_oplock_lock;
-
 /* Outstanding dir notify requests */
 GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
 /* DirNotify response queue */
@@ -725,3 +723,4 @@
 GLOBAL_EXTERN unsigned int cifs_min_small;  /* min size of small buf pool */
 GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
 
+extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index da8fbf5..6928c24 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -86,18 +86,17 @@
 			     const int stage,
 			     const struct nls_table *nls_cp);
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
-extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
-						 struct cifsTconInfo *);
-extern void DeleteOplockQEntry(struct oplock_q_entry *);
-extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
 				      int offset);
 
+extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
+				__u16 fileHandle, struct file *file,
+				struct vfsmount *mnt, unsigned int oflags);
 extern int cifs_posix_open(char *full_path, struct inode **pinode,
-			   struct super_block *sb, int mode, int oflags,
-			   int *poplock, __u16 *pnetfid, int xid);
+			   struct vfsmount *mnt, int mode, int oflags,
+			   __u32 *poplock, __u16 *pnetfid, int xid);
 extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
 				     FILE_UNIX_BASIC_INFO *info,
 				     struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 301e307..941441d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -94,6 +94,7 @@
 	list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
 		open_file->invalidHandle = true;
+		open_file->oplock_break_cancelled = true;
 	}
 	write_unlock(&GlobalSMBSeslock);
 	/* BB Add call to invalidate_inodes(sb) for all superblocks mounted
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d496824..43003e0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1670,7 +1670,6 @@
 	CIFSSMBTDis(xid, tcon);
 	_FreeXid(xid);
 
-	DeleteTconOplockQEntries(tcon);
 	tconInfoFree(tcon);
 	cifs_put_smb_ses(ses);
 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a6424cf..627a60a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -24,6 +24,7 @@
 #include <linux/stat.h>
 #include <linux/slab.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -129,44 +130,45 @@
 	return full_path;
 }
 
-static void
-cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle,
-			struct cifsTconInfo *tcon, bool write_only)
+struct cifsFileInfo *
+cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
+		  struct file *file, struct vfsmount *mnt, unsigned int oflags)
 {
 	int oplock = 0;
 	struct cifsFileInfo *pCifsFile;
 	struct cifsInodeInfo *pCifsInode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
 
 	pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
-
 	if (pCifsFile == NULL)
-		return;
+		return pCifsFile;
 
 	if (oplockEnabled)
 		oplock = REQ_OPLOCK;
 
 	pCifsFile->netfid = fileHandle;
 	pCifsFile->pid = current->tgid;
-	pCifsFile->pInode = newinode;
+	pCifsFile->pInode = igrab(newinode);
+	pCifsFile->mnt = mnt;
+	pCifsFile->pfile = file;
 	pCifsFile->invalidHandle = false;
 	pCifsFile->closePend = false;
 	mutex_init(&pCifsFile->fh_mutex);
 	mutex_init(&pCifsFile->lock_mutex);
 	INIT_LIST_HEAD(&pCifsFile->llist);
 	atomic_set(&pCifsFile->count, 1);
+	slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops);
 
-	/* set the following in open now
-			pCifsFile->pfile = file; */
 	write_lock(&GlobalSMBSeslock);
-	list_add(&pCifsFile->tlist, &tcon->openFileList);
+	list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
 	pCifsInode = CIFS_I(newinode);
 	if (pCifsInode) {
 		/* if readable file instance put first in list*/
-		if (write_only)
+		if (oflags & FMODE_READ)
+			list_add(&pCifsFile->flist, &pCifsInode->openFileList);
+		else
 			list_add_tail(&pCifsFile->flist,
 				      &pCifsInode->openFileList);
-		else
-			list_add(&pCifsFile->flist, &pCifsInode->openFileList);
 
 		if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
 			pCifsInode->clientCanCacheAll = true;
@@ -176,18 +178,18 @@
 				pCifsInode->clientCanCacheRead = true;
 	}
 	write_unlock(&GlobalSMBSeslock);
+
+	return pCifsFile;
 }
 
 int cifs_posix_open(char *full_path, struct inode **pinode,
-		    struct super_block *sb, int mode, int oflags,
-		    int *poplock, __u16 *pnetfid, int xid)
+		    struct vfsmount *mnt, int mode, int oflags,
+		    __u32 *poplock, __u16 *pnetfid, int xid)
 {
 	int rc;
-	__u32 oplock;
-	bool write_only = false;
 	FILE_UNIX_BASIC_INFO *presp_data;
 	__u32 posix_flags = 0;
-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
 	struct cifs_fattr fattr;
 
 	cFYI(1, ("posix open %s", full_path));
@@ -223,12 +225,9 @@
 	if (oflags & O_DIRECT)
 		posix_flags |= SMB_O_DIRECT;
 
-	if (!(oflags & FMODE_READ))
-		write_only = true;
-
 	mode &= ~current_umask();
 	rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
-			pnetfid, presp_data, &oplock, full_path,
+			pnetfid, presp_data, poplock, full_path,
 			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	if (rc)
@@ -244,7 +243,7 @@
 
 	/* get new inode and set it up */
 	if (*pinode == NULL) {
-		*pinode = cifs_iget(sb, &fattr);
+		*pinode = cifs_iget(mnt->mnt_sb, &fattr);
 		if (!*pinode) {
 			rc = -ENOMEM;
 			goto posix_open_ret;
@@ -253,7 +252,7 @@
 		cifs_fattr_to_inode(*pinode, &fattr);
 	}
 
-	cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
+	cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
 
 posix_open_ret:
 	kfree(presp_data);
@@ -280,7 +279,7 @@
 	int rc = -ENOENT;
 	int xid;
 	int create_options = CREATE_NOT_DIR;
-	int oplock = 0;
+	__u32 oplock = 0;
 	int oflags;
 	bool posix_create = false;
 	/*
@@ -298,7 +297,6 @@
 	FILE_ALL_INFO *buf = NULL;
 	struct inode *newinode = NULL;
 	int disposition = FILE_OVERWRITE_IF;
-	bool write_only = false;
 
 	xid = GetXid();
 
@@ -323,7 +321,7 @@
 	if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
-		rc = cifs_posix_open(full_path, &newinode, inode->i_sb,
+		rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
 				     mode, oflags, &oplock, &fileHandle, xid);
 		/* EIO could indicate that (posix open) operation is not
 		   supported, despite what server claimed in capability
@@ -351,11 +349,8 @@
 		desiredAccess = 0;
 		if (oflags & FMODE_READ)
 			desiredAccess |= GENERIC_READ; /* is this too little? */
-		if (oflags & FMODE_WRITE) {
+		if (oflags & FMODE_WRITE)
 			desiredAccess |= GENERIC_WRITE;
-			if (!(oflags & FMODE_READ))
-				write_only = true;
-		}
 
 		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
 			disposition = FILE_CREATE;
@@ -470,8 +465,8 @@
 		/* mknod case - do not leave file open */
 		CIFSSMBClose(xid, tcon, fileHandle);
 	} else if (!(posix_create) && (newinode)) {
-			cifs_fill_fileinfo(newinode, fileHandle,
-					cifs_sb->tcon, write_only);
+			cifs_new_fileinfo(newinode, fileHandle, NULL,
+						nd->path.mnt, oflags);
 	}
 cifs_create_out:
 	kfree(buf);
@@ -611,7 +606,7 @@
 {
 	int xid;
 	int rc = 0; /* to get around spurious gcc warning, set to zero here */
-	int oplock = 0;
+	__u32 oplock = 0;
 	__u16 fileHandle = 0;
 	bool posix_open = false;
 	struct cifs_sb_info *cifs_sb;
@@ -683,8 +678,7 @@
 		if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
 		     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
 		     (nd->intent.open.flags & O_CREAT)) {
-			rc = cifs_posix_open(full_path, &newInode,
-					parent_dir_inode->i_sb,
+			rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
 					nd->intent.open.create_mode,
 					nd->intent.open.flags, &oplock,
 					&fileHandle, xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa7beac..429337e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -30,6 +30,7 @@
 #include <linux/writeback.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/delay.h>
+#include <linux/mount.h>
 #include <asm/div64.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
@@ -39,27 +40,6 @@
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
 
-static inline struct cifsFileInfo *cifs_init_private(
-	struct cifsFileInfo *private_data, struct inode *inode,
-	struct file *file, __u16 netfid)
-{
-	memset(private_data, 0, sizeof(struct cifsFileInfo));
-	private_data->netfid = netfid;
-	private_data->pid = current->tgid;
-	mutex_init(&private_data->fh_mutex);
-	mutex_init(&private_data->lock_mutex);
-	INIT_LIST_HEAD(&private_data->llist);
-	private_data->pfile = file; /* needed for writepage */
-	private_data->pInode = inode;
-	private_data->invalidHandle = false;
-	private_data->closePend = false;
-	/* Initialize reference count to one.  The private data is
-	freed on the release of the last reference */
-	atomic_set(&private_data->count, 1);
-
-	return private_data;
-}
-
 static inline int cifs_convert_flags(unsigned int flags)
 {
 	if ((flags & O_ACCMODE) == O_RDONLY)
@@ -123,9 +103,11 @@
 }
 
 /* all arguments to this function must be checked for validity in caller */
-static inline int cifs_posix_open_inode_helper(struct inode *inode,
-			struct file *file, struct cifsInodeInfo *pCifsInode,
-			struct cifsFileInfo *pCifsFile, int oplock, u16 netfid)
+static inline int
+cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
+			     struct cifsInodeInfo *pCifsInode,
+			     struct cifsFileInfo *pCifsFile, __u32 oplock,
+			     u16 netfid)
 {
 
 	write_lock(&GlobalSMBSeslock);
@@ -219,17 +201,6 @@
 	struct timespec temp;
 	int rc;
 
-	/* want handles we can use to read with first
-	   in the list so we do not have to walk the
-	   list to search for one in write_begin */
-	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
-		list_add_tail(&pCifsFile->flist,
-			      &pCifsInode->openFileList);
-	} else {
-		list_add(&pCifsFile->flist,
-			 &pCifsInode->openFileList);
-	}
-	write_unlock(&GlobalSMBSeslock);
 	if (pCifsInode->clientCanCacheRead) {
 		/* we have the inode open somewhere else
 		   no need to discard cache data */
@@ -279,7 +250,8 @@
 int cifs_open(struct inode *inode, struct file *file)
 {
 	int rc = -EACCES;
-	int xid, oplock;
+	int xid;
+	__u32 oplock;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *pCifsFile;
@@ -324,7 +296,7 @@
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 		int oflags = (int) cifs_posix_convert_flags(file->f_flags);
 		/* can not refresh inode info since size could be stale */
-		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
+		rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
 				     cifs_sb->mnt_file_mode /* ignored */,
 				     oflags, &oplock, &netfid, xid);
 		if (rc == 0) {
@@ -414,24 +386,17 @@
 		cFYI(1, ("cifs_open returned 0x%x", rc));
 		goto out;
 	}
-	file->private_data =
-		kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+
+	pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
+					file->f_flags);
+	file->private_data = pCifsFile;
 	if (file->private_data == NULL) {
 		rc = -ENOMEM;
 		goto out;
 	}
-	pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
-	write_lock(&GlobalSMBSeslock);
-	list_add(&pCifsFile->tlist, &tcon->openFileList);
 
-	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-	if (pCifsInode) {
-		rc = cifs_open_inode_helper(inode, file, pCifsInode,
-					    pCifsFile, tcon,
-					    &oplock, buf, full_path, xid);
-	} else {
-		write_unlock(&GlobalSMBSeslock);
-	}
+	rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
+				    &oplock, buf, full_path, xid);
 
 	if (oplock & CIFS_CREATE_ACTION) {
 		/* time to set mode which we can not set earlier due to
@@ -474,7 +439,8 @@
 static int cifs_reopen_file(struct file *file, bool can_flush)
 {
 	int rc = -EACCES;
-	int xid, oplock;
+	int xid;
+	__u32 oplock;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *pCifsFile;
@@ -543,7 +509,7 @@
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 		int oflags = (int) cifs_posix_convert_flags(file->f_flags);
 		/* can not refresh inode info since size could be stale */
-		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
+		rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
 				     cifs_sb->mnt_file_mode /* ignored */,
 				     oflags, &oplock, &netfid, xid);
 		if (rc == 0) {
@@ -2308,6 +2274,73 @@
 	return rc;
 }
 
+static void
+cifs_oplock_break(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	struct inode *inode = cfile->pInode;
+	struct cifsInodeInfo *cinode = CIFS_I(inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
+	int rc, waitrc = 0;
+
+	if (inode && S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+		if (cinode->clientCanCacheAll == 0)
+			break_lease(inode, FMODE_READ);
+		else if (cinode->clientCanCacheRead == 0)
+			break_lease(inode, FMODE_WRITE);
+#endif
+		rc = filemap_fdatawrite(inode->i_mapping);
+		if (cinode->clientCanCacheRead == 0) {
+			waitrc = filemap_fdatawait(inode->i_mapping);
+			invalidate_remote_inode(inode);
+		}
+		if (!rc)
+			rc = waitrc;
+		if (rc)
+			cinode->write_behind_rc = rc;
+		cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
+	}
+
+	/*
+	 * releasing stale oplock after recent reconnect of smb session using
+	 * a now incorrect file handle is not a data integrity issue but do
+	 * not bother sending an oplock release if session to server still is
+	 * disconnected since oplock already released by the server
+	 */
+	if (!cfile->closePend && !cfile->oplock_break_cancelled) {
+		rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
+				 LOCKING_ANDX_OPLOCK_RELEASE, false);
+		cFYI(1, ("Oplock release rc = %d", rc));
+	}
+}
+
+static int
+cifs_oplock_break_get(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	mntget(cfile->mnt);
+	cifsFileInfo_get(cfile);
+	return 0;
+}
+
+static void
+cifs_oplock_break_put(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	mntput(cfile->mnt);
+	cifsFileInfo_put(cfile);
+}
+
+const struct slow_work_ops cifs_oplock_break_ops = {
+	.get_ref	= cifs_oplock_break_get,
+	.put_ref	= cifs_oplock_break_put,
+	.execute	= cifs_oplock_break,
+};
+
 const struct address_space_operations cifs_addr_ops = {
 	.readpage = cifs_readpage,
 	.readpages = cifs_readpages,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e079a91..0241b25 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,7 +32,6 @@
 
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
-extern struct task_struct *oplockThread;
 
 /* The xid serves as a useful identifier for each incoming vfs request,
    in a similar way to the mid which is useful to track each sent smb,
@@ -500,6 +499,7 @@
 	struct cifsTconInfo *tcon;
 	struct cifsInodeInfo *pCifsInode;
 	struct cifsFileInfo *netfile;
+	int rc;
 
 	cFYI(1, ("Checking for oplock break or dnotify response"));
 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -562,30 +562,40 @@
 				continue;
 
 			cifs_stats_inc(&tcon->num_oplock_brks);
-			write_lock(&GlobalSMBSeslock);
+			read_lock(&GlobalSMBSeslock);
 			list_for_each(tmp2, &tcon->openFileList) {
 				netfile = list_entry(tmp2, struct cifsFileInfo,
 						     tlist);
 				if (pSMB->Fid != netfile->netfid)
 					continue;
 
-				write_unlock(&GlobalSMBSeslock);
-				read_unlock(&cifs_tcp_ses_lock);
+				/*
+				 * don't do anything if file is about to be
+				 * closed anyway.
+				 */
+				if (netfile->closePend) {
+					read_unlock(&GlobalSMBSeslock);
+					read_unlock(&cifs_tcp_ses_lock);
+					return true;
+				}
+
 				cFYI(1, ("file id match, oplock break"));
 				pCifsInode = CIFS_I(netfile->pInode);
 				pCifsInode->clientCanCacheAll = false;
 				if (pSMB->OplockLevel == 0)
 					pCifsInode->clientCanCacheRead = false;
-				pCifsInode->oplockPending = true;
-				AllocOplockQEntry(netfile->pInode,
-						  netfile->netfid, tcon);
-				cFYI(1, ("about to wake up oplock thread"));
-				if (oplockThread)
-					wake_up_process(oplockThread);
-
+				rc = slow_work_enqueue(&netfile->oplock_break);
+				if (rc) {
+					cERROR(1, ("failed to enqueue oplock "
+						   "break: %d\n", rc));
+				} else {
+					netfile->oplock_break_cancelled = false;
+				}
+				read_unlock(&GlobalSMBSeslock);
+				read_unlock(&cifs_tcp_ses_lock);
 				return true;
 			}
-			write_unlock(&GlobalSMBSeslock);
+			read_unlock(&GlobalSMBSeslock);
 			read_unlock(&cifs_tcp_ses_lock);
 			cFYI(1, ("No matching file for oplock break"));
 			return true;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f823a4a..1f098ca 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -146,7 +146,7 @@
 	}
 }
 
-void
+static void
 cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
 		       struct cifs_sb_info *cifs_sb)
 {
@@ -161,7 +161,7 @@
 	cifs_fill_common_info(fattr, cifs_sb);
 }
 
-void
+static void
 cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
 		       struct cifs_sb_info *cifs_sb)
 {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1da4ab2..07b8e71 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -103,56 +103,6 @@
 	mempool_free(midEntry, cifs_mid_poolp);
 }
 
-struct oplock_q_entry *
-AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
-{
-	struct oplock_q_entry *temp;
-	if ((pinode == NULL) || (tcon == NULL)) {
-		cERROR(1, ("Null parms passed to AllocOplockQEntry"));
-		return NULL;
-	}
-	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
-						       GFP_KERNEL);
-	if (temp == NULL)
-		return temp;
-	else {
-		temp->pinode = pinode;
-		temp->tcon = tcon;
-		temp->netfid = fid;
-		spin_lock(&cifs_oplock_lock);
-		list_add_tail(&temp->qhead, &cifs_oplock_list);
-		spin_unlock(&cifs_oplock_lock);
-	}
-	return temp;
-}
-
-void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
-{
-	spin_lock(&cifs_oplock_lock);
-    /* should we check if list empty first? */
-	list_del(&oplockEntry->qhead);
-	spin_unlock(&cifs_oplock_lock);
-	kmem_cache_free(cifs_oplock_cachep, oplockEntry);
-}
-
-
-void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
-{
-	struct oplock_q_entry *temp;
-
-	if (tcon == NULL)
-		return;
-
-	spin_lock(&cifs_oplock_lock);
-	list_for_each_entry(temp, &cifs_oplock_list, qhead) {
-		if ((temp->tcon) && (temp->tcon == tcon)) {
-			list_del(&temp->qhead);
-			kmem_cache_free(cifs_oplock_cachep, temp);
-		}
-	}
-	spin_unlock(&cifs_oplock_lock);
-}
-
 static int
 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8e1e5e1..9d5360c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -41,8 +41,9 @@
 	long nr_pages;
 	struct super_block *sb;
 	enum writeback_sync_modes sync_mode;
-	int for_kupdate;
-	int range_cyclic;
+	int for_kupdate:1;
+	int range_cyclic:1;
+	int for_background:1;
 };
 
 /*
@@ -249,14 +250,25 @@
  *   completion. Caller need not hold sb s_umount semaphore.
  *
  */
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+			 long nr_pages)
 {
 	struct wb_writeback_args args = {
+		.sb		= sb,
 		.sync_mode	= WB_SYNC_NONE,
 		.nr_pages	= nr_pages,
 		.range_cyclic	= 1,
 	};
 
+	/*
+	 * We treat @nr_pages=0 as the special case to do background writeback,
+	 * ie. to sync pages until the background dirty threshold is reached.
+	 */
+	if (!nr_pages) {
+		args.nr_pages = LONG_MAX;
+		args.for_background = 1;
+	}
+
 	bdi_alloc_queue_work(bdi, &args);
 }
 
@@ -310,7 +322,7 @@
 	 * For inodes being constantly redirtied, dirtied_when can get stuck.
 	 * It _appears_ to be in the future, but is actually in distant past.
 	 * This test is necessary to prevent such wrapped-around relative times
-	 * from permanently stopping the whole pdflush writeback.
+	 * from permanently stopping the whole bdi writeback.
 	 */
 	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
 #endif
@@ -324,13 +336,38 @@
 			       struct list_head *dispatch_queue,
 				unsigned long *older_than_this)
 {
+	LIST_HEAD(tmp);
+	struct list_head *pos, *node;
+	struct super_block *sb = NULL;
+	struct inode *inode;
+	int do_sb_sort = 0;
+
 	while (!list_empty(delaying_queue)) {
-		struct inode *inode = list_entry(delaying_queue->prev,
-						struct inode, i_list);
+		inode = list_entry(delaying_queue->prev, struct inode, i_list);
 		if (older_than_this &&
 		    inode_dirtied_after(inode, *older_than_this))
 			break;
-		list_move(&inode->i_list, dispatch_queue);
+		if (sb && sb != inode->i_sb)
+			do_sb_sort = 1;
+		sb = inode->i_sb;
+		list_move(&inode->i_list, &tmp);
+	}
+
+	/* just one sb in list, splice to dispatch_queue and we're done */
+	if (!do_sb_sort) {
+		list_splice(&tmp, dispatch_queue);
+		return;
+	}
+
+	/* Move inodes from one superblock together */
+	while (!list_empty(&tmp)) {
+		inode = list_entry(tmp.prev, struct inode, i_list);
+		sb = inode->i_sb;
+		list_for_each_prev_safe(pos, node, &tmp) {
+			inode = list_entry(pos, struct inode, i_list);
+			if (inode->i_sb == sb)
+				list_move(&inode->i_list, dispatch_queue);
+		}
 	}
 }
 
@@ -439,8 +476,18 @@
 	spin_lock(&inode_lock);
 	inode->i_state &= ~I_SYNC;
 	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
-		if (!(inode->i_state & I_DIRTY) &&
-		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
+			/*
+			 * More pages get dirtied by a fast dirtier.
+			 */
+			goto select_queue;
+		} else if (inode->i_state & I_DIRTY) {
+			/*
+			 * At least XFS will redirty the inode during the
+			 * writeback (delalloc) and on io completion (isize).
+			 */
+			redirty_tail(inode);
+		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
 			/*
 			 * We didn't write back all the pages.  nfs_writepages()
 			 * sometimes bales out without doing anything. Redirty
@@ -462,6 +509,7 @@
 				 * soon as the queue becomes uncongested.
 				 */
 				inode->i_state |= I_DIRTY_PAGES;
+select_queue:
 				if (wbc->nr_to_write <= 0) {
 					/*
 					 * slice used up: queue for next turn
@@ -484,12 +532,6 @@
 				inode->i_state |= I_DIRTY_PAGES;
 				redirty_tail(inode);
 			}
-		} else if (inode->i_state & I_DIRTY) {
-			/*
-			 * Someone redirtied the inode while were writing back
-			 * the pages.
-			 */
-			redirty_tail(inode);
 		} else if (atomic_read(&inode->i_count)) {
 			/*
 			 * The inode is clean, inuse
@@ -506,6 +548,17 @@
 	return ret;
 }
 
+static void unpin_sb_for_writeback(struct super_block **psb)
+{
+	struct super_block *sb = *psb;
+
+	if (sb) {
+		up_read(&sb->s_umount);
+		put_super(sb);
+		*psb = NULL;
+	}
+}
+
 /*
  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
  * before calling writeback. So make sure that we do pin it, so it doesn't
@@ -515,11 +568,20 @@
  * 1 if we failed.
  */
 static int pin_sb_for_writeback(struct writeback_control *wbc,
-				   struct inode *inode)
+				struct inode *inode, struct super_block **psb)
 {
 	struct super_block *sb = inode->i_sb;
 
 	/*
+	 * If this sb is already pinned, nothing more to do. If not and
+	 * *psb is non-NULL, unpin the old one first
+	 */
+	if (sb == *psb)
+		return 0;
+	else if (*psb)
+		unpin_sb_for_writeback(psb);
+
+	/*
 	 * Caller must already hold the ref for this
 	 */
 	if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -532,7 +594,7 @@
 	if (down_read_trylock(&sb->s_umount)) {
 		if (sb->s_root) {
 			spin_unlock(&sb_lock);
-			return 0;
+			goto pinned;
 		}
 		/*
 		 * umounted, drop rwsem again and fall through to failure
@@ -543,24 +605,15 @@
 	sb->s_count--;
 	spin_unlock(&sb_lock);
 	return 1;
-}
-
-static void unpin_sb_for_writeback(struct writeback_control *wbc,
-				   struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-
-	if (wbc->sync_mode == WB_SYNC_ALL)
-		return;
-
-	up_read(&sb->s_umount);
-	put_super(sb);
+pinned:
+	*psb = sb;
+	return 0;
 }
 
 static void writeback_inodes_wb(struct bdi_writeback *wb,
 				struct writeback_control *wbc)
 {
-	struct super_block *sb = wbc->sb;
+	struct super_block *sb = wbc->sb, *pin_sb = NULL;
 	const int is_blkdev_sb = sb_is_blkdev_sb(sb);
 	const unsigned long start = jiffies;	/* livelock avoidance */
 
@@ -619,7 +672,7 @@
 		if (inode_dirtied_after(inode, start))
 			break;
 
-		if (pin_sb_for_writeback(wbc, inode)) {
+		if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
 			requeue_io(inode);
 			continue;
 		}
@@ -628,7 +681,6 @@
 		__iget(inode);
 		pages_skipped = wbc->pages_skipped;
 		writeback_single_inode(inode, wbc);
-		unpin_sb_for_writeback(wbc, inode);
 		if (wbc->pages_skipped != pages_skipped) {
 			/*
 			 * writeback is not making progress due to locked
@@ -648,6 +700,8 @@
 			wbc->more_io = 1;
 	}
 
+	unpin_sb_for_writeback(&pin_sb);
+
 	spin_unlock(&inode_lock);
 	/* Leave any unwritten inodes on b_io */
 }
@@ -706,6 +760,7 @@
 	};
 	unsigned long oldest_jif;
 	long wrote = 0;
+	struct inode *inode;
 
 	if (wbc.for_kupdate) {
 		wbc.older_than_this = &oldest_jif;
@@ -719,20 +774,16 @@
 
 	for (;;) {
 		/*
-		 * Don't flush anything for non-integrity writeback where
-		 * no nr_pages was given
+		 * Stop writeback when nr_pages has been consumed
 		 */
-		if (!args->for_kupdate && args->nr_pages <= 0 &&
-		     args->sync_mode == WB_SYNC_NONE)
+		if (args->nr_pages <= 0)
 			break;
 
 		/*
-		 * If no specific pages were given and this is just a
-		 * periodic background writeout and we are below the
-		 * background dirty threshold, don't do anything
+		 * For background writeout, stop when we are below the
+		 * background dirty threshold
 		 */
-		if (args->for_kupdate && args->nr_pages <= 0 &&
-		    !over_bground_thresh())
+		if (args->for_background && !over_bground_thresh())
 			break;
 
 		wbc.more_io = 0;
@@ -744,13 +795,32 @@
 		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
 
 		/*
-		 * If we ran out of stuff to write, bail unless more_io got set
+		 * If we consumed everything, see if we have more
 		 */
-		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
-			if (wbc.more_io && !wbc.for_kupdate)
-				continue;
+		if (wbc.nr_to_write <= 0)
+			continue;
+		/*
+		 * Didn't write everything and we don't have more IO, bail
+		 */
+		if (!wbc.more_io)
 			break;
+		/*
+		 * Did we write something? Try for more
+		 */
+		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+			continue;
+		/*
+		 * Nothing written. Wait for some inode to
+		 * become available for writeback. Otherwise
+		 * we'll just busyloop.
+		 */
+		spin_lock(&inode_lock);
+		if (!list_empty(&wb->b_more_io))  {
+			inode = list_entry(wb->b_more_io.prev,
+						struct inode, i_list);
+			inode_wait_for_writeback(inode);
 		}
+		spin_unlock(&inode_lock);
 	}
 
 	return wrote;
@@ -1060,9 +1130,6 @@
  * If older_than_this is non-NULL, then only write out inodes which
  * had their first dirtying at a time earlier than *older_than_this.
  *
- * If we're a pdlfush thread, then implement pdflush collision avoidance
- * against the entire list.
- *
  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
  * This function assumes that the blockdev superblock's inodes are backed by
  * a variety of queues, so all inodes are searched.  For other superblocks,
@@ -1141,7 +1208,7 @@
 	nr_to_write = nr_dirty + nr_unstable +
 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
 
-	bdi_writeback_all(sb, nr_to_write);
+	bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1cef139..3cd9ccd 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -70,7 +70,6 @@
 	ACPI_BUS_TYPE_POWER,
 	ACPI_BUS_TYPE_PROCESSOR,
 	ACPI_BUS_TYPE_THERMAL,
-	ACPI_BUS_TYPE_SYSTEM,
 	ACPI_BUS_TYPE_POWER_BUTTON,
 	ACPI_BUS_TYPE_SLEEP_BUTTON,
 	ACPI_BUS_DEVICE_TYPE_COUNT
@@ -142,10 +141,7 @@
 
 struct acpi_device_flags {
 	u32 dynamic_status:1;
-	u32 hardware_id:1;
-	u32 compatible_ids:1;
 	u32 bus_address:1;
-	u32 unique_id:1;
 	u32 removable:1;
 	u32 ejectable:1;
 	u32 lockable:1;
@@ -154,7 +150,7 @@
 	u32 performance_manageable:1;
 	u32 wake_capable:1;	/* Wakeup(_PRW) supported? */
 	u32 force_power_state:1;
-	u32 reserved:19;
+	u32 reserved:22;
 };
 
 /* File System */
@@ -172,20 +168,23 @@
 typedef char acpi_device_name[40];
 typedef char acpi_device_class[20];
 
+struct acpi_hardware_id {
+	struct list_head list;
+	char *id;
+};
+
 struct acpi_device_pnp {
 	acpi_bus_id bus_id;	/* Object name */
 	acpi_bus_address bus_address;	/* _ADR */
-	char *hardware_id;	/* _HID */
-	struct acpica_device_id_list *cid_list;	/* _CIDs */
 	char *unique_id;	/* _UID */
+	struct list_head ids;		/* _HID and _CIDs */
 	acpi_device_name device_name;	/* Driver-determined */
 	acpi_device_class device_class;	/*        "          */
 };
 
 #define acpi_device_bid(d)	((d)->pnp.bus_id)
 #define acpi_device_adr(d)	((d)->pnp.bus_address)
-#define acpi_device_hid(d)	((d)->pnp.hardware_id)
-#define acpi_device_uid(d)	((d)->pnp.unique_id)
+char *acpi_device_hid(struct acpi_device *device);
 #define acpi_device_name(d)	((d)->pnp.device_name)
 #define acpi_device_class(d)	((d)->pnp.device_class)
 
@@ -262,7 +261,8 @@
 /* Device */
 
 struct acpi_device {
-	acpi_handle handle;
+	int device_type;
+	acpi_handle handle;		/* no handle for fixed hardware */
 	struct acpi_device *parent;
 	struct list_head children;
 	struct list_head node;
@@ -322,6 +322,8 @@
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
 void acpi_bus_data_handler(acpi_handle handle, void *context);
+acpi_status acpi_bus_get_status_handle(acpi_handle handle,
+				       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
 int acpi_bus_get_power(acpi_handle handle, int *state);
 int acpi_bus_set_power(acpi_handle handle, int state);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ee33c2..b449e73 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -101,7 +101,8 @@
 		const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
 void bdi_unregister(struct backing_dev_info *bdi);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+				long nr_pages);
 int bdi_writeback_task(struct bdi_writeback *wb);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
 
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 79ca2da..0f5f578 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -27,6 +27,11 @@
  * Any other use of the locks below is probably wrong.
  */
 
+enum backlight_update_reason {
+	BACKLIGHT_UPDATE_HOTKEY,
+	BACKLIGHT_UPDATE_SYSFS,
+};
+
 struct backlight_device;
 struct fb_info;
 
@@ -100,6 +105,8 @@
 extern struct backlight_device *backlight_device_register(const char *name,
 	struct device *dev, void *devdata, struct backlight_ops *ops);
 extern void backlight_device_unregister(struct backlight_device *bd);
+extern void backlight_force_update(struct backlight_device *bd,
+				   enum backlight_update_reason reason);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index b882610..3b1594d 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -78,8 +78,6 @@
 #define IPCCALL(version,op)	((version)<<16 | (op))
 
 #ifdef __KERNEL__
-
-#include <linux/kref.h>
 #include <linux/spinlock.h>
 
 #define IPCMNI 32768  /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 0cef6ba..b0cb0eb 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -16,7 +16,6 @@
 #define _KREF_H_
 
 #include <linux/types.h>
-#include <asm/atomic.h>
 
 struct kref {
 	atomic_t refcount;
diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h
new file mode 100644
index 0000000..6bc090d
--- /dev/null
+++ b/include/linux/mfd/wm831x/status.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_STATUS_H__
+#define __MFD_WM831X_STATUS_H__
+
+#define WM831X_LED_SRC_MASK                    0xC000  /* LED_SRC - [15:14] */
+#define WM831X_LED_SRC_SHIFT                       14  /* LED_SRC - [15:14] */
+#define WM831X_LED_SRC_WIDTH                        2  /* LED_SRC - [15:14] */
+#define WM831X_LED_MODE_MASK                   0x0300  /* LED_MODE - [9:8] */
+#define WM831X_LED_MODE_SHIFT                       8  /* LED_MODE - [9:8] */
+#define WM831X_LED_MODE_WIDTH                       2  /* LED_MODE - [9:8] */
+#define WM831X_LED_SEQ_LEN_MASK                0x0030  /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_SEQ_LEN_SHIFT                    4  /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_SEQ_LEN_WIDTH                    2  /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_DUR_MASK                    0x000C  /* LED_DUR - [3:2] */
+#define WM831X_LED_DUR_SHIFT                        2  /* LED_DUR - [3:2] */
+#define WM831X_LED_DUR_WIDTH                        2  /* LED_DUR - [3:2] */
+#define WM831X_LED_DUTY_CYC_MASK               0x0003  /* LED_DUTY_CYC - [1:0] */
+#define WM831X_LED_DUTY_CYC_SHIFT                   0  /* LED_DUTY_CYC - [1:0] */
+#define WM831X_LED_DUTY_CYC_WIDTH                   2  /* LED_DUTY_CYC - [1:0] */
+
+#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f6b9024..d09db1b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -40,7 +40,6 @@
 #ifdef __KERNEL__
 
 #include <linux/in.h>
-#include <linux/kref.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/rbtree.h>
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 368bd70..7b7fbf4 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -361,7 +361,7 @@
 	 *	struct perf_event_header	header;
 	 *	u32				pid, ppid;
 	 *	u32				tid, ptid;
-	 *	{ u64				time;     } && PERF_SAMPLE_TIME
+	 *	u64				time;
 	 * };
 	 */
 	PERF_EVENT_FORK			= 7,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index acefaf7..3a9d36d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -357,7 +357,7 @@
 	 *	struct perf_event_header	header;
 	 *	u32				pid, ppid;
 	 *	u32				tid, ptid;
-	 *	{ u64				time;     } && PERF_SAMPLE_TIME
+	 *	u64				time;
 	 * };
 	 */
 	PERF_RECORD_FORK			= 7,
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
new file mode 100644
index 0000000..555d254
--- /dev/null
+++ b/include/linux/spi/lms283gf05.h
@@ -0,0 +1,28 @@
+/*
+ * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
+ *
+ * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
+#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
+
+struct lms283gf05_pdata {
+	unsigned long	reset_gpio;
+	bool		reset_inverted;
+};
+
+#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 660a9de..2aac8a8 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -36,7 +36,7 @@
 #ifndef DECLARE_TRACE
 
 #define TP_PROTO(args...)	args
-#define TP_ARGS(args...)		args
+#define TP_ARGS(args...)	args
 
 #ifdef CONFIG_TRACEPOINTS
 
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index fcfd9a1..e4612db 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -26,7 +26,7 @@
 		__entry->func		= work->func;
 	),
 
-	TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
+	TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
 		__entry->thread_pid, __entry->func)
 );
 
@@ -48,7 +48,7 @@
 		__entry->func		= work->func;
 	),
 
-	TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
+	TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
 		__entry->thread_pid, __entry->func)
 );
 
diff --git a/kernel/futex.c b/kernel/futex.c
index 248dd11..b911adc 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -89,36 +89,36 @@
 	union futex_key key;
 };
 
-/*
- * We use this hashed waitqueue instead of a normal wait_queue_t, so
+/**
+ * struct futex_q - The hashed futex queue entry, one per waiting task
+ * @task:		the task waiting on the futex
+ * @lock_ptr:		the hash bucket lock
+ * @key:		the key the futex is hashed on
+ * @pi_state:		optional priority inheritance state
+ * @rt_waiter:		rt_waiter storage for use with requeue_pi
+ * @requeue_pi_key:	the requeue_pi target futex key
+ * @bitset:		bitset for the optional bitmasked wakeup
+ *
+ * We use this hashed waitqueue, instead of a normal wait_queue_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
  *
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  * The order of wakup is always to make the first condition true, then
- * wake up q->waiter, then make the second condition true.
+ * the second.
+ *
+ * PI futexes are typically woken before they are removed from the hash list via
+ * the rt_mutex code. See unqueue_me_pi().
  */
 struct futex_q {
 	struct plist_node list;
-	/* Waiter reference */
+
 	struct task_struct *task;
-
-	/* Which hash list lock to use: */
 	spinlock_t *lock_ptr;
-
-	/* Key which the futex is hashed on: */
 	union futex_key key;
-
-	/* Optional priority inheritance state: */
 	struct futex_pi_state *pi_state;
-
-	/* rt_waiter storage for requeue_pi: */
 	struct rt_mutex_waiter *rt_waiter;
-
-	/* The expected requeue pi target futex key: */
 	union futex_key *requeue_pi_key;
-
-	/* Bitset for the optional bitmasked wakeup */
 	u32 bitset;
 };
 
@@ -198,11 +198,12 @@
 }
 
 /**
- * get_futex_key - Get parameters which are the keys for a futex.
- * @uaddr: virtual address of the futex
- * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
- * @key: address where result is stored.
- * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
+ * get_futex_key() - Get parameters which are the keys for a futex
+ * @uaddr:	virtual address of the futex
+ * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
+ * @key:	address where result is stored.
+ * @rw:		mapping needs to be read/write (values: VERIFY_READ,
+ * 		VERIFY_WRITE)
  *
  * Returns a negative error code or 0
  * The key words are stored in *key on success.
@@ -288,8 +289,8 @@
 	drop_futex_key_refs(key);
 }
 
-/*
- * fault_in_user_writeable - fault in user address and verify RW access
+/**
+ * fault_in_user_writeable() - Fault in user address and verify RW access
  * @uaddr:	pointer to faulting user space address
  *
  * Slow path to fixup the fault we just took in the atomic write
@@ -309,8 +310,8 @@
 
 /**
  * futex_top_waiter() - Return the highest priority waiter on a futex
- * @hb:     the hash bucket the futex_q's reside in
- * @key:    the futex key (to distinguish it from other futex futex_q's)
+ * @hb:		the hash bucket the futex_q's reside in
+ * @key:	the futex key (to distinguish it from other futex futex_q's)
  *
  * Must be called with the hb lock held.
  */
@@ -588,7 +589,7 @@
 }
 
 /**
- * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
+ * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
  * @uaddr:		the pi futex user address
  * @hb:			the pi futex hash bucket
  * @key:		the futex key associated with uaddr and hb
@@ -1011,9 +1012,9 @@
 
 /**
  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
- * q:	the futex_q
- * key:	the key of the requeue target futex
- * hb:  the hash_bucket of the requeue target futex
+ * @q:		the futex_q
+ * @key:	the key of the requeue target futex
+ * @hb:		the hash_bucket of the requeue target futex
  *
  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
@@ -1350,6 +1351,25 @@
 	return hb;
 }
 
+static inline void
+queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+{
+	spin_unlock(&hb->lock);
+	drop_futex_key_refs(&q->key);
+}
+
+/**
+ * queue_me() - Enqueue the futex_q on the futex_hash_bucket
+ * @q:	The futex_q to enqueue
+ * @hb:	The destination hash bucket
+ *
+ * The hb->lock must be held by the caller, and is released here. A call to
+ * queue_me() is typically paired with exactly one call to unqueue_me().  The
+ * exceptions involve the PI related operations, which may use unqueue_me_pi()
+ * or nothing if the unqueue is done as part of the wake process and the unqueue
+ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
+ * an example).
+ */
 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
 {
 	int prio;
@@ -1373,19 +1393,17 @@
 	spin_unlock(&hb->lock);
 }
 
-static inline void
-queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
-{
-	spin_unlock(&hb->lock);
-	drop_futex_key_refs(&q->key);
-}
-
-/*
- * queue_me and unqueue_me must be called as a pair, each
- * exactly once.  They are called with the hashed spinlock held.
+/**
+ * unqueue_me() - Remove the futex_q from its futex_hash_bucket
+ * @q:	The futex_q to unqueue
+ *
+ * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
+ * be paired with exactly one earlier call to queue_me().
+ *
+ * Returns:
+ *   1 - if the futex_q was still queued (and we removed unqueued it)
+ *   0 - if the futex_q was already removed by the waking thread
  */
-
-/* Return 1 if we were still queued (ie. 0 means we were woken) */
 static int unqueue_me(struct futex_q *q)
 {
 	spinlock_t *lock_ptr;
@@ -1638,17 +1656,14 @@
 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
 				struct hrtimer_sleeper *timeout)
 {
-	queue_me(q, hb);
-
 	/*
-	 * There might have been scheduling since the queue_me(), as we
-	 * cannot hold a spinlock across the get_user() in case it
-	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
-	 * queueing ourselves into the futex hash. This code thus has to
-	 * rely on the futex_wake() code removing us from hash when it
-	 * wakes us up.
+	 * The task state is guaranteed to be set before another task can
+	 * wake it. set_current_state() is implemented using set_mb() and
+	 * queue_me() calls spin_unlock() upon completion, both serializing
+	 * access to the hash list and forcing another memory barrier.
 	 */
 	set_current_state(TASK_INTERRUPTIBLE);
+	queue_me(q, hb);
 
 	/* Arm the timer */
 	if (timeout) {
@@ -1658,8 +1673,8 @@
 	}
 
 	/*
-	 * !plist_node_empty() is safe here without any lock.
-	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
+	 * If we have been removed from the hash list, then another task
+	 * has tried to wake us, and we can skip the call to schedule().
 	 */
 	if (likely(!plist_node_empty(&q->list))) {
 		/*
@@ -2114,12 +2129,12 @@
 
 /**
  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
- * @uaddr:	the futex we initialyl wait on (non-pi)
+ * @uaddr:	the futex we initially wait on (non-pi)
  * @fshared:	whether the futexes are shared (1) or not (0).  They must be
  * 		the same type, no requeueing from private to shared, etc.
  * @val:	the expected value of uaddr
  * @abs_time:	absolute timeout
- * @bitset:	32 bit wakeup bitset set by userspace, defaults to all.
+ * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
  * @clockrt:	whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
  * @uaddr2:	the pi futex we will take prior to returning to user-space
  *
@@ -2246,7 +2261,7 @@
 		res = fixup_owner(uaddr2, fshared, &q, !ret);
 		/*
 		 * If fixup_owner() returned an error, proprogate that.  If it
-		 * acquired the lock, clear our -ETIMEDOUT or -EINTR.
+		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
 		 */
 		if (res)
 			ret = (res < 0) ? res : 0;
@@ -2302,9 +2317,9 @@
  */
 
 /**
- * sys_set_robust_list - set the robust-futex list head of a task
- * @head: pointer to the list-head
- * @len: length of the list-head, as userspace expects
+ * sys_set_robust_list() - Set the robust-futex list head of a task
+ * @head:	pointer to the list-head
+ * @len:	length of the list-head, as userspace expects
  */
 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
 		size_t, len)
@@ -2323,10 +2338,10 @@
 }
 
 /**
- * sys_get_robust_list - get the robust-futex list head of a task
- * @pid: pid of the process [zero for current task]
- * @head_ptr: pointer to a list-head pointer, the kernel fills it in
- * @len_ptr: pointer to a length field, the kernel fills in the header size
+ * sys_get_robust_list() - Get the robust-futex list head of a task
+ * @pid:	pid of the process [zero for current task]
+ * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
+ * @len_ptr:	pointer to a length field, the kernel fills in the header size
  */
 SYSCALL_DEFINE3(get_robust_list, int, pid,
 		struct robust_list_head __user * __user *, head_ptr,
diff --git a/kernel/module.c b/kernel/module.c
index 5a29397..fe748a8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3091,7 +3091,6 @@
 		   struct modversion_info *ver,
 		   struct kernel_param *kp,
 		   struct kernel_symbol *ks,
-		   struct marker *marker,
 		   struct tracepoint *tp)
 {
 }
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0911334..5e18c6a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -394,15 +394,11 @@
 {
 	struct clocksource *cs;
 
-	mutex_lock(&clocksource_mutex);
-
 	list_for_each_entry(cs, &clocksource_list, list)
 		if (cs->resume)
 			cs->resume();
 
 	clocksource_resume_watchdog();
-
-	mutex_unlock(&clocksource_mutex);
 }
 
 /**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a142579..46592fe 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1621,8 +1621,10 @@
 		if (!ret) {
 			struct seq_file *m = file->private_data;
 			m->private = iter;
-		} else
+		} else {
+			trace_parser_put(&iter->parser);
 			kfree(iter);
+		}
 	} else
 		file->private_data = iter;
 	mutex_unlock(&ftrace_regex_lock);
@@ -2202,7 +2204,7 @@
 	struct trace_parser *parser;
 	ssize_t ret, read;
 
-	if (!cnt || cnt < 0)
+	if (!cnt)
 		return 0;
 
 	mutex_lock(&ftrace_regex_lock);
@@ -2216,7 +2218,7 @@
 	parser = &iter->parser;
 	read = trace_get_user(parser, ubuf, cnt, ppos);
 
-	if (trace_parser_loaded(parser) &&
+	if (read >= 0 && trace_parser_loaded(parser) &&
 	    !trace_parser_cont(parser)) {
 		ret = ftrace_process_regex(parser->buffer,
 					   parser->idx, enable);
@@ -2552,8 +2554,7 @@
 		   size_t cnt, loff_t *ppos)
 {
 	struct trace_parser parser;
-	size_t read = 0;
-	ssize_t ret;
+	ssize_t read, ret;
 
 	if (!cnt || cnt < 0)
 		return 0;
@@ -2562,29 +2563,31 @@
 
 	if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
 		ret = -EBUSY;
-		goto out;
+		goto out_unlock;
 	}
 
 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
 		ret = -ENOMEM;
-		goto out;
+		goto out_unlock;
 	}
 
 	read = trace_get_user(&parser, ubuf, cnt, ppos);
 
-	if (trace_parser_loaded((&parser))) {
+	if (read >= 0 && trace_parser_loaded((&parser))) {
 		parser.buffer[parser.idx] = 0;
 
 		/* we allow only one expression at a time */
 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
 					parser.buffer);
 		if (ret)
-			goto out;
+			goto out_free;
 	}
 
 	ret = read;
- out:
+
+out_free:
 	trace_parser_put(&parser);
+out_unlock:
 	mutex_unlock(&graph_lock);
 
 	return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 411af37..4506826 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -415,7 +415,7 @@
 
 	/* read the non-space input */
 	while (cnt && !isspace(ch)) {
-		if (parser->idx < parser->size)
+		if (parser->idx < parser->size - 1)
 			parser->buffer[parser->idx++] = ch;
 		else {
 			ret = -EINVAL;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6f03c8a..d128f65 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@
 		   size_t cnt, loff_t *ppos)
 {
 	struct trace_parser parser;
-	size_t read = 0;
-	ssize_t ret;
+	ssize_t read, ret;
 
-	if (!cnt || cnt < 0)
+	if (!cnt)
 		return 0;
 
 	ret = tracing_update_buffers();
@@ -247,7 +246,7 @@
 
 	read = trace_get_user(&parser, ubuf, cnt, ppos);
 
-	if (trace_parser_loaded((&parser))) {
+	if (read >= 0 && trace_parser_loaded((&parser))) {
 		int set = 1;
 
 		if (*parser.buffer == '!')
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d99664e..a3b1409 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -44,18 +44,21 @@
 /*
  * When balance_dirty_pages decides that the caller needs to perform some
  * non-background writeback, this is how many pages it will attempt to write.
- * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
+ * It should be somewhat larger than dirtied pages to ensure that reasonably
  * large amounts of I/O are submitted.
  */
-static inline long sync_writeback_pages(void)
+static inline long sync_writeback_pages(unsigned long dirtied)
 {
-	return ratelimit_pages + ratelimit_pages / 2;
+	if (dirtied < ratelimit_pages)
+		dirtied = ratelimit_pages;
+
+	return dirtied + dirtied / 2;
 }
 
 /* The following parameters are exported via /proc/sys/vm */
 
 /*
- * Start background writeback (via pdflush) at this percentage
+ * Start background writeback (via writeback threads) at this percentage
  */
 int dirty_background_ratio = 10;
 
@@ -474,10 +477,11 @@
  * balance_dirty_pages() must be called by processes which are generating dirty
  * data.  It looks at the number of dirty pages in the machine and will force
  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
- * If we're over `background_thresh' then pdflush is woken to perform some
- * writeout.
+ * If we're over `background_thresh' then the writeback threads are woken to
+ * perform some writeout.
  */
-static void balance_dirty_pages(struct address_space *mapping)
+static void balance_dirty_pages(struct address_space *mapping,
+				unsigned long write_chunk)
 {
 	long nr_reclaimable, bdi_nr_reclaimable;
 	long nr_writeback, bdi_nr_writeback;
@@ -485,7 +489,6 @@
 	unsigned long dirty_thresh;
 	unsigned long bdi_thresh;
 	unsigned long pages_written = 0;
-	unsigned long write_chunk = sync_writeback_pages();
 	unsigned long pause = 1;
 
 	struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -579,7 +582,7 @@
 		bdi->dirty_exceeded = 0;
 
 	if (writeback_in_progress(bdi))
-		return;		/* pdflush is already working this queue */
+		return;
 
 	/*
 	 * In laptop mode, we wait until hitting the higher threshold before
@@ -590,10 +593,10 @@
 	 * background_thresh, to keep the amount of dirty memory low.
 	 */
 	if ((laptop_mode && pages_written) ||
-	    (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
-					  + global_page_state(NR_UNSTABLE_NFS))
+	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+			       + global_page_state(NR_UNSTABLE_NFS))
 					  > background_thresh)))
-		bdi_start_writeback(bdi, nr_writeback);
+		bdi_start_writeback(bdi, NULL, 0);
 }
 
 void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -640,9 +643,10 @@
 	p =  &__get_cpu_var(bdp_ratelimits);
 	*p += nr_pages_dirtied;
 	if (unlikely(*p >= ratelimit)) {
+		ratelimit = sync_writeback_pages(*p);
 		*p = 0;
 		preempt_enable();
-		balance_dirty_pages(mapping);
+		balance_dirty_pages(mapping, ratelimit);
 		return;
 	}
 	preempt_enable();
diff --git a/mm/shmem.c b/mm/shmem.c
index 98631c2..ccf446a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1046,8 +1046,9 @@
 	 * sync from ever calling shmem_writepage; but a stacking filesystem
 	 * may use the ->writepage of its underlying filesystem, in which case
 	 * tmpfs should write out to swap only in response to memory pressure,
-	 * and not for pdflush or sync.  However, in those cases, we do still
-	 * want to check if there's a redundant swappage to be discarded.
+	 * and not for the writeback threads or sync.  However, in those cases,
+	 * we do still want to check if there's a redundant swappage to be
+	 * discarded.
 	 */
 	if (wbc->for_reclaim)
 		swap = get_swap_page();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1219ceb..64e43889 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1709,10 +1709,10 @@
  *
  * If the caller is !__GFP_FS then the probability of a failure is reasonably
  * high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about.  We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written.  But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about.  We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written.  But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
  *
  * returns:	0, if no pages reclaimed
  * 		else, the number of pages reclaimed
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index d6b1b05..fbcac76 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -358,6 +358,7 @@
 	ax25_dev *ax25_dev;
 	ax25_cb *ax25;
 	unsigned int k;
+	int ret = 0;
 
 	if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
 		return -EFAULT;
@@ -388,57 +389,63 @@
 	case AX25_WINDOW:
 		if (ax25->modulus == AX25_MODULUS) {
 			if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
-				return -EINVAL;
+				goto einval_put;
 		} else {
 			if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
-				return -EINVAL;
+				goto einval_put;
 		}
 		ax25->window = ax25_ctl.arg;
 		break;
 
 	case AX25_T1:
 		if (ax25_ctl.arg < 1)
-			return -EINVAL;
+			goto einval_put;
 		ax25->rtt = (ax25_ctl.arg * HZ) / 2;
 		ax25->t1  = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_T2:
 		if (ax25_ctl.arg < 1)
-			return -EINVAL;
+			goto einval_put;
 		ax25->t2 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_N2:
 		if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
-			return -EINVAL;
+			goto einval_put;
 		ax25->n2count = 0;
 		ax25->n2 = ax25_ctl.arg;
 		break;
 
 	case AX25_T3:
 		if (ax25_ctl.arg < 0)
-			return -EINVAL;
+			goto einval_put;
 		ax25->t3 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_IDLE:
 		if (ax25_ctl.arg < 0)
-			return -EINVAL;
+			goto einval_put;
 		ax25->idle = ax25_ctl.arg * 60 * HZ;
 		break;
 
 	case AX25_PACLEN:
 		if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
-			return -EINVAL;
+			goto einval_put;
 		ax25->paclen = ax25_ctl.arg;
 		break;
 
 	default:
-		return -EINVAL;
+		goto einval_put;
 	  }
 
-	return 0;
+out_put:
+	ax25_cb_put(ax25);
+	return ret;
+
+einval_put:
+	ret = -EINVAL;
+	goto out_put;
 }
 
 static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index d69a759..0854f11 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -10,6 +10,7 @@
 perf-top
 perf*.1
 perf*.xml
+perf*.html
 common-cmds.h
 tags
 TAGS
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 16af2d8..e5f6ece 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -338,14 +338,24 @@
 
 static void abs_printout(int counter, double avg)
 {
+	double total, ratio = 0.0;
+
 	fprintf(stderr, " %14.0f  %-24s", avg, event_name(counter));
 
 	if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
-		fprintf(stderr, " # %10.3f IPC  ",
-				avg / avg_stats(&runtime_cycles_stats));
+		total = avg_stats(&runtime_cycles_stats);
+
+		if (total)
+			ratio = avg / total;
+
+		fprintf(stderr, " # %10.3f IPC  ", ratio);
 	} else {
-		fprintf(stderr, " # %10.3f M/sec",
-				1000.0 * avg / avg_stats(&runtime_nsecs_stats));
+		total = avg_stats(&runtime_nsecs_stats);
+
+		if (total)
+			ratio = 1000.0 * avg / total;
+
+		fprintf(stderr, " # %10.3f M/sec", ratio);
 	}
 }
 
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
index 3d567fe..0d8c85d 100644
--- a/tools/perf/util/module.c
+++ b/tools/perf/util/module.c
@@ -4,6 +4,7 @@
 #include "module.h"
 
 #include <libelf.h>
+#include <libgen.h>
 #include <gelf.h>
 #include <elf.h>
 #include <dirent.h>
@@ -409,35 +410,40 @@
 static int mod_dso__load_module_paths(struct mod_dso *self)
 {
 	struct utsname uts;
-	int count = 0, len;
+	int count = 0, len, err = -1;
 	char *line = NULL;
 	FILE *file;
-	char *path;
+	char *dpath, *dir;
 	size_t n;
 
 	if (uname(&uts) < 0)
-		goto out_failure;
+		return err;
 
 	len = strlen("/lib/modules/");
 	len += strlen(uts.release);
 	len += strlen("/modules.dep");
 
-	path = calloc(1, len);
-	if (path == NULL)
-		goto out_failure;
+	dpath = calloc(1, len + 1);
+	if (dpath == NULL)
+		return err;
 
-	strcat(path, "/lib/modules/");
-	strcat(path, uts.release);
-	strcat(path, "/modules.dep");
+	strcat(dpath, "/lib/modules/");
+	strcat(dpath, uts.release);
+	strcat(dpath, "/modules.dep");
 
-	file = fopen(path, "r");
-	free(path);
+	file = fopen(dpath, "r");
 	if (file == NULL)
 		goto out_failure;
 
+	dir = dirname(dpath);
+	if (!dir)
+		goto out_failure;
+	strcat(dir, "/");
+
 	while (!feof(file)) {
-		char *name, *tmp;
 		struct module *module;
+		char *name, *path, *tmp;
+		FILE *modfile;
 		int line_len;
 
 		line_len = getline(&line, &n, file);
@@ -445,17 +451,41 @@
 			break;
 
 		if (!line)
-			goto out_failure;
+			break;
 
 		line[--line_len] = '\0'; /* \n */
 
-		path = strtok(line, ":");
+		path = strchr(line, ':');
 		if (!path)
-			goto out_failure;
+			break;
+		*path = '\0';
+
+		path = strdup(line);
+		if (!path)
+			break;
+
+		if (!strstr(path, dir)) {
+			if (strncmp(path, "kernel/", 7))
+				break;
+
+			free(path);
+			path = calloc(1, strlen(dir) + strlen(line) + 1);
+			if (!path)
+				break;
+			strcat(path, dir);
+			strcat(path, line);
+		}
+
+		modfile = fopen(path, "r");
+		if (modfile == NULL)
+			break;
+		fclose(modfile);
 
 		name = strdup(path);
-		name = strtok(name, "/");
+		if (!name)
+			break;
 
+		name = strtok(name, "/");
 		tmp = name;
 
 		while (tmp) {
@@ -463,26 +493,25 @@
 			if (tmp)
 				name = tmp;
 		}
-		name = strsep(&name, ".");
 
-		/* Quirk: replace '-' with '_' in sound modules */
+		name = strsep(&name, ".");
+		if (!name)
+			break;
+
+		/* Quirk: replace '-' with '_' in all modules */
 		for (len = strlen(name); len; len--) {
 			if (*(name+len) == '-')
 				*(name+len) = '_';
 		}
 
 		module = module__new(name, path);
-		if (!module) {
-			fprintf(stderr, "load_module_paths: allocation error\n");
-			goto out_failure;
-		}
+		if (!module)
+			break;
 		mod_dso__insert_module(self, module);
 
 		module->sections = sec_dso__new_dso("sections");
-		if (!module->sections) {
-			fprintf(stderr, "load_module_paths: allocation error\n");
-			goto out_failure;
-		}
+		if (!module->sections)
+			break;
 
 		module->active = mod_dso__load_sections(module);
 
@@ -490,13 +519,20 @@
 			count++;
 	}
 
-	free(line);
-	fclose(file);
-
-	return count;
+	if (feof(file))
+		err = count;
+	else
+		fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n");
 
 out_failure:
-	return -1;
+	if (dpath)
+		free(dpath);
+	if (file)
+		fclose(file);
+	if (line)
+		free(line);
+
+	return err;
 }
 
 int mod_dso__load_modules(struct mod_dso *dso)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 13ab4b8..87c424d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -165,33 +165,31 @@
 	DIR *sys_dir, *evt_dir;
 	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
 	char id_buf[4];
-	int sys_dir_fd, fd;
+	int fd;
 	u64 id;
 	char evt_path[MAXPATHLEN];
+	char dir_path[MAXPATHLEN];
 
 	if (valid_debugfs_mount(debugfs_path))
 		return NULL;
 
 	sys_dir = opendir(debugfs_path);
 	if (!sys_dir)
-		goto cleanup;
-	sys_dir_fd = dirfd(sys_dir);
+		return NULL;
 
 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
-		int dfd = openat(sys_dir_fd, sys_dirent.d_name,
-				 O_RDONLY|O_DIRECTORY), evt_dir_fd;
-		if (dfd == -1)
+
+		snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+			 sys_dirent.d_name);
+		evt_dir = opendir(dir_path);
+		if (!evt_dir)
 			continue;
-		evt_dir = fdopendir(dfd);
-		if (!evt_dir) {
-			close(dfd);
-			continue;
-		}
-		evt_dir_fd = dirfd(evt_dir);
+
 		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
-			snprintf(evt_path, MAXPATHLEN, "%s/id",
+
+			snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
 				 evt_dirent.d_name);
-			fd = openat(evt_dir_fd, evt_path, O_RDONLY);
+			fd = open(evt_path, O_RDONLY);
 			if (fd < 0)
 				continue;
 			if (read(fd, id_buf, sizeof(id_buf)) < 0) {
@@ -225,7 +223,6 @@
 		closedir(evt_dir);
 	}
 
-cleanup:
 	closedir(sys_dir);
 	return NULL;
 }
@@ -761,28 +758,24 @@
 {
 	DIR *sys_dir, *evt_dir;
 	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
-	int sys_dir_fd;
 	char evt_path[MAXPATHLEN];
+	char dir_path[MAXPATHLEN];
 
 	if (valid_debugfs_mount(debugfs_path))
 		return;
 
 	sys_dir = opendir(debugfs_path);
 	if (!sys_dir)
-		goto cleanup;
-	sys_dir_fd = dirfd(sys_dir);
+		return;
 
 	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
-		int dfd = openat(sys_dir_fd, sys_dirent.d_name,
-				 O_RDONLY|O_DIRECTORY), evt_dir_fd;
-		if (dfd == -1)
+
+		snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+			 sys_dirent.d_name);
+		evt_dir = opendir(dir_path);
+		if (!evt_dir)
 			continue;
-		evt_dir = fdopendir(dfd);
-		if (!evt_dir) {
-			close(dfd);
-			continue;
-		}
-		evt_dir_fd = dirfd(evt_dir);
+
 		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
 			snprintf(evt_path, MAXPATHLEN, "%s:%s",
 				 sys_dirent.d_name, evt_dirent.d_name);
@@ -791,8 +784,6 @@
 		}
 		closedir(evt_dir);
 	}
-
-cleanup:
 	closedir(sys_dir);
 }
 
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fd3d9c8..559fb06 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -833,7 +833,7 @@
 	struct mod_dso *mods = mod_dso__new_dso("modules");
 	struct module *pos;
 	struct rb_node *next;
-	int err;
+	int err, count = 0;
 
 	err = mod_dso__load_modules(mods);
 
@@ -852,14 +852,16 @@
 			break;
 
 		next = rb_next(&pos->rb_node);
+		count += err;
 	}
 
 	if (err < 0) {
 		mod_dso__delete_modules(mods);
 		mod_dso__delete_self(mods);
+		return err;
 	}
 
-	return err;
+	return count;
 }
 
 static inline void dso__fill_symbol_holes(struct dso *self)
@@ -913,8 +915,15 @@
 
 	if (vmlinux) {
 		err = dso__load_vmlinux(self, vmlinux, filter, v);
-		if (err > 0 && use_modules)
-			err = dso__load_modules(self, filter, v);
+		if (err > 0 && use_modules) {
+			int syms = dso__load_modules(self, filter, v);
+
+			if (syms < 0) {
+				fprintf(stderr, "dso__load_modules failed!\n");
+				return syms;
+			}
+			err += syms;
+		}
 	}
 
 	if (err <= 0)