Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: Ensure inode allocation buffers are fully replayed
  xfs: enable background pushing of the CIL
  xfs: forced unmounts need to push the CIL
  xfs: Introduce delayed logging core code
  xfs: Delayed logging design documentation
  xfs: Improve scalability of busy extent tracking
  xfs: make the log ticket ID available outside the log infrastructure
  xfs: clean up log ticket overrun debug output
  xfs: Clean up XFS_BLI_* flag namespace
  xfs: modify buffer item reference counting
  xfs: allow log ticket allocation to take allocation flags
  xfs: Don't reuse the same transaction ID for duplicated transactions.
diff --git a/Documentation/ABI/testing/sysfs-devices-node b/Documentation/ABI/testing/sysfs-devices-node
new file mode 100644
index 0000000..453a210
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-node
@@ -0,0 +1,7 @@
+What:		/sys/devices/system/node/nodeX/compact
+Date:		February 2010
+Contact:	Mel Gorman <mel@csn.ul.ie>
+Description:
+		When this file is written to, all memory within that node
+		will be compacted. When it completes, memory will be freed
+		into blocks which have as many contiguous pages as possible
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index fe09a2c..98ef551 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -94,11 +94,19 @@
 a range being two hyphen-separated decimal numbers, the smallest and
 largest node numbers in the range.  For example, mpol=bind:0-3,5,7,9-15
 
+A memory policy with a valid NodeList will be saved, as specified, for
+use at file creation time.  When a task allocates a file in the file
+system, the mount option memory policy will be applied with a NodeList,
+if any, modified by the calling task's cpuset constraints
+[See Documentation/cgroups/cpusets.txt] and any optional flags, listed
+below.  If the resulting NodeLists is the empty set, the effective memory
+policy for the file will revert to "default" policy.
+
 NUMA memory allocation policies have optional flags that can be used in
 conjunction with their modes.  These optional flags can be specified
 when tmpfs is mounted by appending them to the mode before the NodeList.
 See Documentation/vm/numa_memory_policy.txt for a list of all available
-memory allocation policy mode flags.
+memory allocation policy mode flags and their effect on memory policy.
 
 	=static		is equivalent to	MPOL_F_STATIC_NODES
 	=relative	is equivalent to	MPOL_F_RELATIVE_NODES
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6c7d18c..5fdbb61 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -19,6 +19,7 @@
 Currently, these files are in /proc/sys/vm:
 
 - block_dump
+- compact_memory
 - dirty_background_bytes
 - dirty_background_ratio
 - dirty_bytes
@@ -26,6 +27,7 @@
 - dirty_ratio
 - dirty_writeback_centisecs
 - drop_caches
+- extfrag_threshold
 - hugepages_treat_as_movable
 - hugetlb_shm_group
 - laptop_mode
@@ -64,6 +66,15 @@
 
 ==============================================================
 
+compact_memory
+
+Available only when CONFIG_COMPACTION is set. When 1 is written to the file,
+all zones are compacted such that free memory is available in contiguous
+blocks where possible. This can be important for example in the allocation of
+huge pages although processes will also directly compact memory as required.
+
+==============================================================
+
 dirty_background_bytes
 
 Contains the amount of dirty memory at which the pdflush background writeback
@@ -139,6 +150,20 @@
 
 ==============================================================
 
+extfrag_threshold
+
+This parameter affects whether the kernel will compact memory or direct
+reclaim to satisfy a high-order allocation. /proc/extfrag_index shows what
+the fragmentation index for each order is in each zone in the system. Values
+tending towards 0 imply allocations would fail due to lack of memory,
+values towards 1000 imply failures are due to fragmentation and -1 implies
+that the allocation will succeed as long as watermarks are met.
+
+The kernel will not compact memory in a zone if the
+fragmentation index is <= extfrag_threshold. The default value is 500.
+
+==============================================================
+
 hugepages_treat_as_movable
 
 This parameter is only useful when kernelcore= is specified at boot time to
diff --git a/arch/alpha/math-emu/sfp-util.h b/arch/alpha/math-emu/sfp-util.h
index f53707f..d4c6ae7 100644
--- a/arch/alpha/math-emu/sfp-util.h
+++ b/arch/alpha/math-emu/sfp-util.h
@@ -28,8 +28,3 @@
 #define UDIV_NEEDS_NORMALIZATION 1  
 
 #define abort()			goto bad_insn
-
-#ifndef __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN -1
-#endif
-#define __BYTE_ORDER __LITTLE_ENDIAN
diff --git a/arch/arm/plat-samsung/include/plat/regs-rtc.h b/arch/arm/plat-samsung/include/plat/regs-rtc.h
index d5837cf..65c190d 100644
--- a/arch/arm/plat-samsung/include/plat/regs-rtc.h
+++ b/arch/arm/plat-samsung/include/plat/regs-rtc.h
@@ -20,6 +20,10 @@
 #define S3C2410_RTCCON_CLKSEL (1<<1)
 #define S3C2410_RTCCON_CNTSEL (1<<2)
 #define S3C2410_RTCCON_CLKRST (1<<3)
+#define S3C64XX_RTCCON_TICEN  (1<<8)
+
+#define S3C64XX_RTCCON_TICMSK (0xF<<7)
+#define S3C64XX_RTCCON_TICSHT (7)
 
 #define S3C2410_TICNT	      S3C2410_RTCREG(0x44)
 #define S3C2410_TICNT_ENABLE  (1<<7)
diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
index 2797163..7dc0f0f 100644
--- a/arch/frv/include/asm/cache.h
+++ b/arch/frv/include/asm/cache.h
@@ -17,6 +17,8 @@
 #define L1_CACHE_SHIFT		(CONFIG_FRV_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
+#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
+
 #define __cacheline_aligned	__attribute__((aligned(L1_CACHE_BYTES)))
 #define ____cacheline_aligned	__attribute__((aligned(L1_CACHE_BYTES)))
 
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
index 2da7164..e6bedd0 100644
--- a/arch/frv/include/asm/gdb-stub.h
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -12,6 +12,7 @@
 #ifndef __ASM_GDB_STUB_H
 #define __ASM_GDB_STUB_H
 
+#undef GDBSTUB_DEBUG_IO
 #undef GDBSTUB_DEBUG_PROTOCOL
 
 #include <asm/ptrace.h>
@@ -108,6 +109,12 @@
 extern void debug_to_serial(const char *p, int n);
 extern void console_set_baud(unsigned baud);
 
+#ifdef GDBSTUB_DEBUG_IO
+#define gdbstub_io(FMT,...) gdbstub_printk(FMT, ##__VA_ARGS__)
+#else
+#define gdbstub_io(FMT,...) ({ 0; })
+#endif
+
 #ifdef GDBSTUB_DEBUG_PROTOCOL
 #define gdbstub_proto(FMT,...) gdbstub_printk(FMT,##__VA_ARGS__)
 #else
diff --git a/arch/frv/kernel/gdb-io.c b/arch/frv/kernel/gdb-io.c
index c997bcc..2ca641d 100644
--- a/arch/frv/kernel/gdb-io.c
+++ b/arch/frv/kernel/gdb-io.c
@@ -171,11 +171,11 @@
 		return -EINTR;
 	}
 	else if (st & (UART_LSR_FE|UART_LSR_OE|UART_LSR_PE)) {
-		gdbstub_proto("### GDB Rx Error (st=%02x) ###\n",st);
+		gdbstub_io("### GDB Rx Error (st=%02x) ###\n",st);
 		return -EIO;
 	}
 	else {
-		gdbstub_proto("### GDB Rx %02x (st=%02x) ###\n",ch,st);
+		gdbstub_io("### GDB Rx %02x (st=%02x) ###\n",ch,st);
 		*_ch = ch & 0x7f;
 		return 0;
 	}
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
index 7ca8a6b..84d103c3 100644
--- a/arch/frv/kernel/gdb-stub.c
+++ b/arch/frv/kernel/gdb-stub.c
@@ -1344,6 +1344,44 @@
 
 } /* end gdbstub_get_mmu_state() */
 
+/*
+ * handle general query commands of the form 'qXXXXX'
+ */
+static void gdbstub_handle_query(void)
+{
+	if (strcmp(input_buffer, "qAttached") == 0) {
+		/* return current thread ID */
+		sprintf(output_buffer, "1");
+		return;
+	}
+
+	if (strcmp(input_buffer, "qC") == 0) {
+		/* return current thread ID */
+		sprintf(output_buffer, "QC 0");
+		return;
+	}
+
+	if (strcmp(input_buffer, "qOffsets") == 0) {
+		/* return relocation offset of text and data segments */
+		sprintf(output_buffer, "Text=0;Data=0;Bss=0");
+		return;
+	}
+
+	if (strcmp(input_buffer, "qSymbol::") == 0) {
+		sprintf(output_buffer, "OK");
+		return;
+	}
+
+	if (strcmp(input_buffer, "qSupported") == 0) {
+		/* query of supported features */
+		sprintf(output_buffer, "PacketSize=%u;ReverseContinue-;ReverseStep-",
+			sizeof(input_buffer));
+		return;
+	}
+
+	gdbstub_strcpy(output_buffer,"E01");
+}
+
 /*****************************************************************************/
 /*
  * handle event interception and GDB remote protocol processing
@@ -1840,6 +1878,10 @@
 		case 'k' :
 			goto done;	/* just continue */
 
+			/* detach */
+		case 'D':
+			gdbstub_strcpy(output_buffer, "OK");
+			break;
 
 			/* reset the whole machine (FIXME: system dependent) */
 		case 'r':
@@ -1852,6 +1894,14 @@
 			__debug_status.dcr |= DCR_SE;
 			goto done;
 
+			/* extended command */
+		case 'v':
+			if (strcmp(input_buffer, "vCont?") == 0) {
+				output_buffer[0] = 0;
+				break;
+			}
+			goto unsupported_cmd;
+
 			/* set baud rate (bBB) */
 		case 'b':
 			ptr = &input_buffer[1];
@@ -1923,8 +1973,19 @@
 			gdbstub_strcpy(output_buffer,"OK");
 			break;
 
+			/* Thread-setting packet */
+		case 'H':
+			gdbstub_strcpy(output_buffer, "OK");
+			break;
+
+		case 'q':
+			gdbstub_handle_query();
+			break;
+
 		default:
+		unsupported_cmd:
 			gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
+			gdbstub_strcpy(output_buffer,"E01");
 			break;
 		}
 
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index e41222d..f0cc1f8 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1,157 +1 @@
-/* MN10300 Atomic counter operations
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_ATOMIC_H
-#define _ASM_ATOMIC_H
-
-#ifdef CONFIG_SMP
-#error not SMP safe
-#endif
-
-/*
- * Atomic operations that C can't guarantee us.  Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i)	{ (i) }
-
-#ifdef __KERNEL__
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_read(v)	(*(volatile int *)&(v)->counter)
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-#include <asm/system.h>
-
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-	unsigned long flags;
-	int temp;
-
-	local_irq_save(flags);
-	temp = v->counter;
-	temp += i;
-	v->counter = temp;
-	local_irq_restore(flags);
-
-	return temp;
-}
-
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-	unsigned long flags;
-	int temp;
-
-	local_irq_save(flags);
-	temp = v->counter;
-	temp -= i;
-	v->counter = temp;
-	local_irq_restore(flags);
-
-	return temp;
-}
-
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
-	return atomic_add_return(i, v) < 0;
-}
-
-static inline void atomic_add(int i, atomic_t *v)
-{
-	atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-	atomic_sub_return(i, v);
-}
-
-static inline void atomic_inc(atomic_t *v)
-{
-	atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
-	atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v)		atomic_sub_return(1, (v))
-#define atomic_inc_return(v)		atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
-
-#define atomic_add_unless(v, a, u)				\
-({								\
-	int c, old;						\
-	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-		c = old;					\
-	c != (u);						\
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-	unsigned long flags;
-
-	mask = ~mask;
-	local_irq_save(flags);
-	*addr &= mask;
-	local_irq_restore(flags);
-}
-
-#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
-
-/* Atomic operations are already serializing on MN10300??? */
-#define smp_mb__before_atomic_dec()	barrier()
-#define smp_mb__after_atomic_dec()	barrier()
-#define smp_mb__before_atomic_inc()	barrier()
-#define smp_mb__after_atomic_inc()	barrier()
-
-#include <asm-generic/atomic-long.h>
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_ATOMIC_H */
+#include <asm-generic/atomic.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index e03cfa2..6e2fe28 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,6 +21,8 @@
 #define L1_CACHE_DISPARITY	L1_CACHE_NENTRIES * L1_CACHE_BYTES
 #endif
 
+#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
+
 /* data cache purge registers
  * - read from the register to unconditionally purge that cache line
  * - write address & 0xffffff00 to conditionally purge that cache line
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a..8b8fab9 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -353,12 +353,6 @@
 #define abort()								\
 	return 0
 
-#ifdef __BIG_ENDIAN
-#define __BYTE_ORDER __BIG_ENDIAN
-#else
-#define __BYTE_ORDER __LITTLE_ENDIAN
-#endif
-
 /* Exception flags. */
 #define EFLAG_INVALID		(1 << (31 - 2))
 #define EFLAG_OVERFLOW		(1 << (31 - 3))
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 0addc64..7d43fee 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -73,5 +73,3 @@
 #define UDIV_NEEDS_NORMALIZATION 0
 
 #define abort() return 0
-
-#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
index 8ae1bd3..e8526021 100644
--- a/arch/sh/math-emu/sfp-util.h
+++ b/arch/sh/math-emu/sfp-util.h
@@ -66,7 +66,3 @@
   } while (0)
 
 #define abort()	return 0
-
-#define __BYTE_ORDER __LITTLE_ENDIAN
-
-
diff --git a/arch/sparc/math-emu/sfp-util_32.h b/arch/sparc/math-emu/sfp-util_32.h
index d1b2aff3..0ea35af 100644
--- a/arch/sparc/math-emu/sfp-util_32.h
+++ b/arch/sparc/math-emu/sfp-util_32.h
@@ -107,9 +107,3 @@
 
 #define abort()								\
 	return 0
-
-#ifdef __BIG_ENDIAN
-#define __BYTE_ORDER __BIG_ENDIAN
-#else
-#define __BYTE_ORDER __LITTLE_ENDIAN
-#endif
diff --git a/arch/sparc/math-emu/sfp-util_64.h b/arch/sparc/math-emu/sfp-util_64.h
index 425d3cf..d17c9bc 100644
--- a/arch/sparc/math-emu/sfp-util_64.h
+++ b/arch/sparc/math-emu/sfp-util_64.h
@@ -112,9 +112,3 @@
 
 #define abort() \
 	return 0
-
-#ifdef __BIG_ENDIAN
-#define __BYTE_ORDER __BIG_ENDIAN
-#else
-#define __BYTE_ORDER __LITTLE_ENDIAN
-#endif
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 89bbf4e..7b1aaa2 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -195,11 +195,11 @@
 
 
 
-#if BYTE_ORDER == LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
 #define le16_to_cpu(val) (val)
 #define le32_to_cpu(val) (val)
 #endif
-#if BYTE_ORDER == BIG_ENDIAN
+#if __BYTE_ORDER == __BIG_ENDIAN
 #define le16_to_cpu(val) bswap_16(val)
 #define le32_to_cpu(val) bswap_32(val)
 #endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f932485..b49d8ca 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -236,6 +236,8 @@
 
 #define MSR_IA32_MISC_ENABLE		0x000001a0
 
+#define MSR_IA32_TEMPERATURE_TARGET	0x000001a2
+
 /* MISC_ENABLE bits: architectural */
 #define MSR_IA32_MISC_ENABLE_FAST_STRING	(1ULL << 0)
 #define MSR_IA32_MISC_ENABLE_TCC		(1ULL << 1)
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index f04c989..ed8cd3c 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -29,5 +29,6 @@
 # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
 #endif
 
+#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
 
 #endif	/* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
index 87cb19d..26664ce 100644
--- a/arch/xtensa/include/asm/hardirq.h
+++ b/arch/xtensa/include/asm/hardirq.h
@@ -11,18 +11,9 @@
 #ifndef _XTENSA_HARDIRQ_H
 #define _XTENSA_HARDIRQ_H
 
-#include <linux/cache.h>
-#include <asm/irq.h>
-
-/* headers.S is sensitive to the offsets of these fields */
-typedef struct {
-	unsigned int __softirq_pending;
-	unsigned int __syscall_count;
-	struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
-	unsigned int __nmi_count;	       /* arch dependent */
-} ____cacheline_aligned irq_cpustat_t;
-
 void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
 
 #endif	/* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 8cd3848..c64a5d3 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -27,15 +27,6 @@
 atomic_t irq_err_count;
 
 /*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
-          printk("unexpected IRQ trap at vector %02x\n", irq);
-}
-
-/*
  * do_IRQ handles all normal device IRQ's (the special
  * SMP cross-CPU interrupts have their own specific
  * handlers).
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 74a7518..70066e3 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -44,14 +44,12 @@
 
 #include <linux/linkage.h>
 #include <asm/ptrace.h>
-#include <asm/ptrace.h>
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
-#include <asm/processor.h>
 
 #define WINDOW_VECTORS_SIZE   0x180
 
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9042a85..c1d23cd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -401,11 +401,6 @@
 	printk("\n");
 }
 
-static u8 hex_val(unsigned char c)
-{
-	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
 static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
 {
 	int i;
@@ -422,8 +417,8 @@
 			return AE_BAD_PARAMETER;
 	}
 	for (i = 0; i < 16; i++) {
-		uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
-		uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+		uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
+		uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
 	}
 	return AE_OK;
 }
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 3fecfb4..5ad3bad 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
 
 #define CFAG12864BFB_NAME "cfag12864bfb"
 
-static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
 	.id = "cfag12864b",
 	.type = FB_TYPE_PACKED_PIXELS,
 	.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@
 	.accel = FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo cfag12864bfb_var __initdata = {
+static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
 	.xres = CFAG12864B_WIDTH,
 	.yres = CFAG12864B_HEIGHT,
 	.xres_virtual = CFAG12864B_WIDTH,
@@ -114,7 +114,7 @@
 	return ret;
 }
 
-static int cfag12864bfb_remove(struct platform_device *device)
+static int __devexit cfag12864bfb_remove(struct platform_device *device)
 {
 	struct fb_info *info = platform_get_drvdata(device);
 
@@ -128,7 +128,7 @@
 
 static struct platform_driver cfag12864bfb_driver = {
 	.probe	= cfag12864bfb_probe,
-	.remove = cfag12864bfb_remove,
+	.remove = __devexit_p(cfag12864bfb_remove),
 	.driver = {
 		.name	= CFAG12864BFB_NAME,
 	},
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 057979a..2bdd8a9 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,6 +9,7 @@
 #include <linux/memory.h>
 #include <linux/node.h>
 #include <linux/hugetlb.h>
+#include <linux/compaction.h>
 #include <linux/cpumask.h>
 #include <linux/topology.h>
 #include <linux/nodemask.h>
@@ -246,6 +247,8 @@
 		scan_unevictable_register_node(node);
 
 		hugetlb_register_node(node);
+
+		compaction_register_node(node);
 	}
 	return error;
 }
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 712d9f2..e024972 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,8 +49,9 @@
 #include <asm/uaccess.h>
 #include <linux/sysrq.h>
 #include <linux/timer.h>
+#include <linux/time.h>
 
-#define VERSION_STR "0.9.0"
+#define VERSION_STR "0.9.1"
 
 #define DEFAULT_IOFENCE_MARGIN 60	/* Default fudge factor, in seconds */
 #define DEFAULT_IOFENCE_TICK 180	/* Default timer timeout, in seconds */
@@ -119,10 +120,8 @@
 #if defined(CONFIG_S390)
 # define HAVE_MONOTONIC
 # define TIMER_FREQ 1000000000ULL
-#elif defined(CONFIG_IA64)
-# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
 #else
-# define TIMER_FREQ (HZ*loops_per_jiffy)
+# define TIMER_FREQ 1000000000ULL
 #endif
 
 #ifdef HAVE_MONOTONIC
@@ -130,7 +129,9 @@
 #else
 static inline unsigned long long monotonic_clock(void)
 {
-	return get_cycles();
+	struct timespec ts;
+	getrawmonotonic(&ts);
+	return timespec_to_ns(&ts);
 }
 #endif  /* HAVE_MONOTONIC */
 
@@ -168,6 +169,13 @@
 			printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
 		}
 	}
+#if 0
+	/*
+	 * Enable to investigate delays in detail
+	 */
+	printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+			tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
 	mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
 	hangcheck_tsc = monotonic_clock();
 }
@@ -180,7 +188,7 @@
 #if defined (HAVE_MONOTONIC)
 	printk("Hangcheck: Using monotonic_clock().\n");
 #else
-	printk("Hangcheck: Using get_cycles().\n");
+	printk("Hangcheck: Using getrawmonotonic().\n");
 #endif  /* HAVE_MONOTONIC */
 	hangcheck_tsc_margin =
 		(unsigned long long)(hangcheck_margin + hangcheck_tick);
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 793b236..d4b14ff 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -194,10 +194,8 @@
 		"HVSI_WAIT_FOR_MCTRL_RESPONSE",
 		"HVSI_FSP_DIED",
 	};
-	const char *name = state_names[hp->state];
-
-	if (hp->state > ARRAY_SIZE(state_names))
-		name = "UNKNOWN";
+	const char *name = (hp->state < ARRAY_SIZE(state_names))
+		? state_names[hp->state] : "UNKNOWN";
 
 	pr_debug("hvsi%i: state = %s\n", hp->index, name);
 #endif /* DEBUG */
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 92ab03d..cd650ca 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -144,6 +144,7 @@
 	old_fops = file->f_op;
 	file->f_op = new_fops;
 	if (file->f_op->open) {
+		file->private_data = c;
 		err=file->f_op->open(inode,file);
 		if (err) {
 			fops_put(file->f_op);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b81ad9c..52ff8aa 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,9 +21,12 @@
 #include <linux/math64.h>
 
 #define BUCKETS 12
+#define INTERVALS 8
 #define RESOLUTION 1024
-#define DECAY 4
+#define DECAY 8
 #define MAX_INTERESTING 50000
+#define STDDEV_THRESH 400
+
 
 /*
  * Concepts and ideas behind the menu governor
@@ -64,6 +67,16 @@
  * indexed based on the magnitude of the expected duration as well as the
  * "is IO outstanding" property.
  *
+ * Repeatable-interval-detector
+ * ----------------------------
+ * There are some cases where "next timer" is a completely unusable predictor:
+ * Those cases where the interval is fixed, for example due to hardware
+ * interrupt mitigation, but also due to fixed transfer rate devices such as
+ * mice.
+ * For this, we use a different predictor: We track the duration of the last 8
+ * intervals and if the stand deviation of these 8 intervals is below a
+ * threshold value, we use the average of these intervals as prediction.
+ *
  * Limiting Performance Impact
  * ---------------------------
  * C states, especially those with large exit latencies, can have a real
@@ -104,6 +117,8 @@
 	unsigned int	exit_us;
 	unsigned int	bucket;
 	u64		correction_factor[BUCKETS];
+	u32		intervals[INTERVALS];
+	int		interval_ptr;
 };
 
 
@@ -175,6 +190,42 @@
 	return div_u64(dividend + (divisor / 2), divisor);
 }
 
+/*
+ * Try detecting repeating patterns by keeping track of the last 8
+ * intervals, and checking if the standard deviation of that set
+ * of points is below a threshold. If it is... then use the
+ * average of these 8 points as the estimated value.
+ */
+static void detect_repeating_patterns(struct menu_device *data)
+{
+	int i;
+	uint64_t avg = 0;
+	uint64_t stddev = 0; /* contains the square of the std deviation */
+
+	/* first calculate average and standard deviation of the past */
+	for (i = 0; i < INTERVALS; i++)
+		avg += data->intervals[i];
+	avg = avg / INTERVALS;
+
+	/* if the avg is beyond the known next tick, it's worthless */
+	if (avg > data->expected_us)
+		return;
+
+	for (i = 0; i < INTERVALS; i++)
+		stddev += (data->intervals[i] - avg) *
+			  (data->intervals[i] - avg);
+
+	stddev = stddev / INTERVALS;
+
+	/*
+	 * now.. if stddev is small.. then assume we have a
+	 * repeating pattern and predict we keep doing this.
+	 */
+
+	if (avg && stddev < STDDEV_THRESH)
+		data->predicted_us = avg;
+}
+
 /**
  * menu_select - selects the next idle state to enter
  * @dev: the CPU
@@ -218,6 +269,8 @@
 	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
 					 RESOLUTION * DECAY);
 
+	detect_repeating_patterns(data);
+
 	/*
 	 * We want to default to C1 (hlt), not to busy polling
 	 * unless the timer is happening really really soon.
@@ -310,6 +363,11 @@
 		new_factor = 1;
 
 	data->correction_factor[data->bucket] = new_factor;
+
+	/* update the repeating-pattern data */
+	data->intervals[data->interval_ptr++] = last_idle_us;
+	if (data->interval_ptr >= INTERVALS)
+		data->interval_ptr = 0;
 }
 
 /**
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0172fa3..a1bf77c 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -188,7 +188,7 @@
 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
 	struct scatterlist *sg, bool last)
 {
-	if (sg_dma_len(sg) > USHORT_MAX) {
+	if (sg_dma_len(sg) > USHRT_MAX) {
 		dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
 		return -EINVAL;
 	}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e17..6a9ac75 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -802,6 +802,15 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called ads7828.
 
+config SENSORS_ADS7871
+	tristate "Texas Instruments ADS7871 A/D converter"
+	depends on SPI
+	help
+	  If you say yes here you get support for TI ADS7871 & ADS7870
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called ads7871.
+
 config SENSORS_AMC6821
 	tristate "Texas Instruments AMC6821"
 	depends on I2C  && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d..86920fb 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_SENSORS_ADM1031)	+= adm1031.o
 obj-$(CONFIG_SENSORS_ADM9240)	+= adm9240.o
 obj-$(CONFIG_SENSORS_ADS7828)	+= ads7828.o
+obj-$(CONFIG_SENSORS_ADS7871)	+= ads7871.o
 obj-$(CONFIG_SENSORS_ADT7411)	+= adt7411.o
 obj-$(CONFIG_SENSORS_ADT7462)	+= adt7462.o
 obj-$(CONFIG_SENSORS_ADT7470)	+= adt7470.o
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
new file mode 100644
index 0000000..b300a20
--- /dev/null
+++ b/drivers/hwmon/ads7871.c
@@ -0,0 +1,253 @@
+/*
+ *  ads7871 - driver for TI ADS7871 A/D converter
+ *
+ *  Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 or
+ *  later as publishhed by the Free Software Foundation.
+ *
+ *	You need to have something like this in struct spi_board_info
+ *	{
+ *		.modalias	= "ads7871",
+ *		.max_speed_hz	= 2*1000*1000,
+ *		.chip_select	= 0,
+ *		.bus_num	= 1,
+ *	},
+ */
+
+/*From figure 18 in the datasheet*/
+/*Register addresses*/
+#define REG_LS_BYTE	0 /*A/D Output Data, LS Byte*/
+#define REG_MS_BYTE	1 /*A/D Output Data, MS Byte*/
+#define REG_PGA_VALID	2 /*PGA Valid Register*/
+#define REG_AD_CONTROL	3 /*A/D Control Register*/
+#define REG_GAIN_MUX	4 /*Gain/Mux Register*/
+#define REG_IO_STATE	5 /*Digital I/O State Register*/
+#define REG_IO_CONTROL	6 /*Digital I/O Control Register*/
+#define REG_OSC_CONTROL	7 /*Rev/Oscillator Control Register*/
+#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
+#define REG_ID		31 /*ID Register*/
+
+/*From figure 17 in the datasheet
+* These bits get ORed with the address to form
+* the instruction byte */
+/*Instruction Bit masks*/
+#define INST_MODE_bm	(1<<7)
+#define INST_READ_bm	(1<<6)
+#define INST_16BIT_bm	(1<<5)
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define MUX_CNV_bv	7
+#define MUX_CNV_bm	(1<<MUX_CNV_bv)
+#define MUX_M3_bm	(1<<3) /*M3 selects single ended*/
+#define MUX_G_bv	4 /*allows for reg = (gain << MUX_G_bv) | ...*/
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define OSC_OSCR_bm	(1<<5)
+#define OSC_OSCE_bm	(1<<4)
+#define OSC_REFE_bm	(1<<3)
+#define OSC_BUFE_bm	(1<<2)
+#define OSC_R2V_bm	(1<<1)
+#define OSC_RBG_bm	(1<<0)
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define DEVICE_NAME	"ads7871"
+
+struct ads7871_data {
+	struct device	*hwmon_dev;
+	struct mutex	update_lock;
+};
+
+static int ads7871_read_reg8(struct spi_device *spi, int reg)
+{
+	int ret;
+	reg = reg | INST_READ_bm;
+	ret = spi_w8r8(spi, reg);
+	return ret;
+}
+
+static int ads7871_read_reg16(struct spi_device *spi, int reg)
+{
+	int ret;
+	reg = reg | INST_READ_bm | INST_16BIT_bm;
+	ret = spi_w8r16(spi, reg);
+	return ret;
+}
+
+static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
+{
+	u8 tmp[2] = {reg, val};
+	return spi_write(spi, tmp, sizeof(tmp));
+}
+
+static ssize_t show_voltage(struct device *dev,
+		struct device_attribute *da, char *buf)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	int ret, val, i = 0;
+	uint8_t channel, mux_cnv;
+
+	channel = attr->index;
+	/*TODO: add support for conversions
+	 *other than single ended with a gain of 1*/
+	/*MUX_M3_bm forces single ended*/
+	/*This is also where the gain of the PGA would be set*/
+	ads7871_write_reg8(spi, REG_GAIN_MUX,
+		(MUX_CNV_bm | MUX_M3_bm | channel));
+
+	ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+	mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+	/*on 400MHz arm9 platform the conversion
+	 *is already done when we do this test*/
+	while ((i < 2) && mux_cnv) {
+		i++;
+		ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+		mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+		msleep_interruptible(1);
+	}
+
+	if (mux_cnv == 0) {
+		val = ads7871_read_reg16(spi, REG_LS_BYTE);
+		/*result in volts*10000 = (val/8192)*2.5*10000*/
+		val = ((val>>2) * 25000) / 8192;
+		return sprintf(buf, "%d\n", val);
+	} else {
+		return -1;
+	}
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
+static struct attribute *ads7871_attributes[] = {
+	&sensor_dev_attr_in0_input.dev_attr.attr,
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+	&sensor_dev_attr_in5_input.dev_attr.attr,
+	&sensor_dev_attr_in6_input.dev_attr.attr,
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group ads7871_group = {
+	.attrs = ads7871_attributes,
+};
+
+static int __devinit ads7871_probe(struct spi_device *spi)
+{
+	int status, ret, err = 0;
+	uint8_t val;
+	struct ads7871_data *pdata;
+
+	dev_dbg(&spi->dev, "probe\n");
+
+	pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+	if (!pdata) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+	if (status < 0)
+		goto error_free;
+
+	pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+	if (IS_ERR(pdata->hwmon_dev)) {
+		err = PTR_ERR(pdata->hwmon_dev);
+		goto error_remove;
+	}
+
+	spi_set_drvdata(spi, pdata);
+
+	/* Configure the SPI bus */
+	spi->mode = (SPI_MODE_0);
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+
+	ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
+	ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
+
+	val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
+	ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
+	ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
+
+	dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+	/*because there is no other error checking on an SPI bus
+	we need to make sure we really have a chip*/
+	if (val != ret) {
+		err = -ENODEV;
+		goto error_remove;
+	}
+
+	return 0;
+
+error_remove:
+	sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+error_free:
+	kfree(pdata);
+exit:
+	return err;
+}
+
+static int __devexit ads7871_remove(struct spi_device *spi)
+{
+	struct ads7871_data *pdata = spi_get_drvdata(spi);
+
+	hwmon_device_unregister(pdata->hwmon_dev);
+	sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+	kfree(pdata);
+	return 0;
+}
+
+static struct spi_driver ads7871_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+
+	.probe = ads7871_probe,
+	.remove = __devexit_p(ads7871_remove),
+};
+
+static int __init ads7871_init(void)
+{
+	return spi_register_driver(&ads7871_driver);
+}
+
+static void __exit ads7871_exit(void)
+{
+	spi_unregister_driver(&ads7871_driver);
+}
+
+module_init(ads7871_init);
+module_exit(ads7871_exit);
+
+MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
+MODULE_DESCRIPTION("TI ADS7871 A/D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index e9b7fbc..2988da1 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -241,6 +241,55 @@
 	return tjmax;
 }
 
+static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+			       struct device *dev)
+{
+	/* The 100C is default for both mobile and non mobile CPUs */
+	int err;
+	u32 eax, edx;
+	u32 val;
+
+	/* A new feature of current Intel(R) processors, the
+	   IA32_TEMPERATURE_TARGET contains the TjMax value */
+	err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+	if (err) {
+		dev_warn(dev, "Unable to read TjMax from CPU.\n");
+	} else {
+		val = (eax >> 16) & 0xff;
+		/*
+		 * If the TjMax is not plausible, an assumption
+		 * will be used
+		 */
+		if ((val > 80) && (val < 120)) {
+			dev_info(dev, "TjMax is %d C.\n", val);
+			return val * 1000;
+		}
+	}
+
+	/*
+	 * An assumption is made for early CPUs and unreadable MSR.
+	 * NOTE: the given value may not be correct.
+	 */
+
+	switch (c->x86_model) {
+	case 0xe:
+	case 0xf:
+	case 0x16:
+	case 0x1a:
+		dev_warn(dev, "TjMax is assumed as 100 C!\n");
+		return 100000;
+		break;
+	case 0x17:
+	case 0x1c:		/* Atom CPUs */
+		return adjust_tjmax(c, id, dev);
+		break;
+	default:
+		dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
+			" using default TjMax of 100C.\n", c->x86_model);
+		return 100000;
+	}
+}
+
 static int __devinit coretemp_probe(struct platform_device *pdev)
 {
 	struct coretemp_data *data;
@@ -283,14 +332,18 @@
 		}
 	}
 
-	data->tjmax = adjust_tjmax(c, data->id, &pdev->dev);
+	data->tjmax = get_tjmax(c, data->id, &pdev->dev);
 	platform_set_drvdata(pdev, data);
 
-	/* read the still undocumented IA32_TEMPERATURE_TARGET it exists
-	   on older CPUs but not in this register, Atoms don't have it either */
+	/*
+	 * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
+	 * on older CPUs but not in this register,
+	 * Atoms don't have it either.
+	 */
 
 	if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
-		err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
+		err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
+		    &eax, &edx);
 		if (err) {
 			dev_warn(&pdev->dev, "Unable to read"
 					" IA32_TEMPERATURE_TARGET MSR\n");
@@ -451,28 +504,20 @@
 
 	for_each_online_cpu(i) {
 		struct cpuinfo_x86 *c = &cpu_data(i);
+		/*
+		 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+		 * sensors. We check this bit only, all the early CPUs
+		 * without thermal sensors will be filtered out.
+		 */
+		if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
+			err = coretemp_device_add(i);
+			if (err)
+				goto exit_devices_unreg;
 
-		/* check if family 6, models 0xe (Pentium M DC),
-		  0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm),
-		  0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom),
-		  0x1e (Lynnfield) */
-		if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
-		    !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
-			(c->x86_model == 0x16) || (c->x86_model == 0x17) ||
-			(c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
-			(c->x86_model == 0x1e))) {
-
-			/* supported CPU not found, but report the unknown
-			   family 6 CPU */
-			if ((c->x86 == 0x6) && (c->x86_model > 0xf))
-				printk(KERN_WARNING DRVNAME ": Unknown CPU "
-					"model 0x%x\n", c->x86_model);
-			continue;
+		} else {
+			printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+				" has no thermal sensor.\n", c->x86_model);
 		}
-
-		err = coretemp_device_add(i);
-		if (err)
-			goto exit_devices_unreg;
 	}
 	if (list_empty(&pdev_list)) {
 		err = -ENODEV;
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index b2f2277..6138f03 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -41,6 +41,8 @@
 
 /* joystick device poll interval in milliseconds */
 #define MDPS_POLL_INTERVAL 50
+#define MDPS_POLL_MIN	   0
+#define MDPS_POLL_MAX	   2000
 /*
  * The sensor can also generate interrupts (DRDY) but it's pretty pointless
  * because they are generated even if the data do not change. So it's better
@@ -121,11 +123,9 @@
 	int position[3];
 	int i;
 
-	mutex_lock(&lis3->mutex);
 	position[0] = lis3->read_data(lis3, OUTX);
 	position[1] = lis3->read_data(lis3, OUTY);
 	position[2] = lis3->read_data(lis3, OUTZ);
-	mutex_unlock(&lis3->mutex);
 
 	for (i = 0; i < 3; i++)
 		position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -249,8 +249,24 @@
 EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
 
 
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+{
+	int x, y, z;
+
+	mutex_lock(&lis3_dev.mutex);
+	lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+	input_report_abs(pidev->input, ABS_X, x);
+	input_report_abs(pidev->input, ABS_Y, y);
+	input_report_abs(pidev->input, ABS_Z, z);
+	input_sync(pidev->input);
+	mutex_unlock(&lis3_dev.mutex);
+}
+
 static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
 {
+	if (!test_bit(0, &lis3_dev.misc_opened))
+		goto out;
+
 	/*
 	 * Be careful: on some HP laptops the bios force DD when on battery and
 	 * the lid is closed. This leads to interrupts as soon as a little move
@@ -260,44 +276,93 @@
 
 	wake_up_interruptible(&lis3_dev.misc_wait);
 	kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+out:
+	if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+	    lis3_dev.idev->input->users)
+		return IRQ_WAKE_THREAD;
+	return IRQ_HANDLED;
+}
+
+static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
+{
+	struct input_dev *dev = lis3->idev->input;
+	u8 click_src;
+
+	mutex_lock(&lis3->mutex);
+	lis3->read(lis3, CLICK_SRC, &click_src);
+
+	if (click_src & CLICK_SINGLE_X) {
+		input_report_key(dev, lis3->mapped_btns[0], 1);
+		input_report_key(dev, lis3->mapped_btns[0], 0);
+	}
+
+	if (click_src & CLICK_SINGLE_Y) {
+		input_report_key(dev, lis3->mapped_btns[1], 1);
+		input_report_key(dev, lis3->mapped_btns[1], 0);
+	}
+
+	if (click_src & CLICK_SINGLE_Z) {
+		input_report_key(dev, lis3->mapped_btns[2], 1);
+		input_report_key(dev, lis3->mapped_btns[2], 0);
+	}
+	input_sync(dev);
+	mutex_unlock(&lis3->mutex);
+}
+
+static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
+{
+	u8 wu1_src;
+	u8 wu2_src;
+
+	lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
+	lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
+
+	wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
+	wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
+
+	/* joystick poll is internally protected by the lis3->mutex. */
+	if (wu1_src || wu2_src)
+		lis3lv02d_joystick_poll(lis3_dev.idev);
+}
+
+static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
+{
+
+	struct lis3lv02d *lis3 = data;
+
+	if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+		lis302dl_interrupt_handle_click(lis3);
+	else
+		lis302dl_interrupt_handle_ff_wu(lis3);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
+{
+
+	struct lis3lv02d *lis3 = data;
+
+	if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+		lis302dl_interrupt_handle_click(lis3);
+	else
+		lis302dl_interrupt_handle_ff_wu(lis3);
+
 	return IRQ_HANDLED;
 }
 
 static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
 {
-	int ret;
-
 	if (test_and_set_bit(0, &lis3_dev.misc_opened))
 		return -EBUSY; /* already open */
 
 	atomic_set(&lis3_dev.count, 0);
-
-	/*
-	 * The sensor can generate interrupts for free-fall and direction
-	 * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
-	 * the things simple and _fast_ we activate it only for free-fall, so
-	 * no need to read register (very slow with ACPI). For the same reason,
-	 * we forbid shared interrupts.
-	 *
-	 * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
-	 * io-apic is not configurable (and generates a warning) but I keep it
-	 * in case of support for other hardware.
-	 */
-	ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING,
-			  DRIVER_NAME, &lis3_dev);
-
-	if (ret) {
-		clear_bit(0, &lis3_dev.misc_opened);
-		printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
-		return -EBUSY;
-	}
 	return 0;
 }
 
 static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
 {
 	fasync_helper(-1, file, 0, &lis3_dev.async_queue);
-	free_irq(lis3_dev.irq, &lis3_dev);
 	clear_bit(0, &lis3_dev.misc_opened); /* release the device */
 	return 0;
 }
@@ -380,22 +445,12 @@
 	.fops    = &lis3lv02d_misc_fops,
 };
 
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
-{
-	int x, y, z;
-
-	lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
-	input_report_abs(pidev->input, ABS_X, x);
-	input_report_abs(pidev->input, ABS_Y, y);
-	input_report_abs(pidev->input, ABS_Z, z);
-	input_sync(pidev->input);
-}
-
 int lis3lv02d_joystick_enable(void)
 {
 	struct input_dev *input_dev;
 	int err;
 	int max_val, fuzz, flat;
+	int btns[] = {BTN_X, BTN_Y, BTN_Z};
 
 	if (lis3_dev.idev)
 		return -EINVAL;
@@ -406,6 +461,8 @@
 
 	lis3_dev.idev->poll = lis3lv02d_joystick_poll;
 	lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+	lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
+	lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
 	input_dev = lis3_dev.idev->input;
 
 	input_dev->name       = "ST LIS3LV02DL Accelerometer";
@@ -422,6 +479,10 @@
 	input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
 	input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
 
+	lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
+	lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
+	lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
+
 	err = input_register_polled_device(lis3_dev.idev);
 	if (err) {
 		input_free_polled_device(lis3_dev.idev);
@@ -434,6 +495,11 @@
 
 void lis3lv02d_joystick_disable(void)
 {
+	if (lis3_dev.irq)
+		free_irq(lis3_dev.irq, &lis3_dev);
+	if (lis3_dev.pdata && lis3_dev.pdata->irq2)
+		free_irq(lis3_dev.pdata->irq2, &lis3_dev);
+
 	if (!lis3_dev.idev)
 		return;
 
@@ -462,7 +528,9 @@
 {
 	int x, y, z;
 
+	mutex_lock(&lis3_dev.mutex);
 	lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+	mutex_unlock(&lis3_dev.mutex);
 	return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
 }
 
@@ -521,12 +589,70 @@
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
 
+static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
+				struct lis3lv02d_platform_data *p)
+{
+	int err;
+	int ctrl2 = p->hipass_ctrl;
+
+	if (p->click_flags) {
+		dev->write(dev, CLICK_CFG, p->click_flags);
+		dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+		dev->write(dev, CLICK_LATENCY, p->click_latency);
+		dev->write(dev, CLICK_WINDOW, p->click_window);
+		dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+		dev->write(dev, CLICK_THSY_X,
+			(p->click_thresh_x & 0xf) |
+			(p->click_thresh_y << 4));
+
+		if (dev->idev) {
+			struct input_dev *input_dev = lis3_dev.idev->input;
+			input_set_capability(input_dev, EV_KEY, BTN_X);
+			input_set_capability(input_dev, EV_KEY, BTN_Y);
+			input_set_capability(input_dev, EV_KEY, BTN_Z);
+		}
+	}
+
+	if (p->wakeup_flags) {
+		dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
+		dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+		/* default to 2.5ms for now */
+		dev->write(dev, FF_WU_DURATION_1, 1);
+		ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
+	}
+
+	if (p->wakeup_flags2) {
+		dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
+		dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+		/* default to 2.5ms for now */
+		dev->write(dev, FF_WU_DURATION_2, 1);
+		ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
+	}
+	/* Configure hipass filters */
+	dev->write(dev, CTRL_REG2, ctrl2);
+
+	if (p->irq2) {
+		err = request_threaded_irq(p->irq2,
+					NULL,
+					lis302dl_interrupt_thread2_8b,
+					IRQF_TRIGGER_RISING |
+					IRQF_ONESHOT,
+					DRIVER_NAME, &lis3_dev);
+		if (err < 0)
+			printk(KERN_ERR DRIVER_NAME
+				"No second IRQ. Limited functionality\n");
+	}
+}
+
 /*
  * Initialise the accelerometer and the various subsystems.
  * Should be rather independent of the bus system.
  */
 int lis3lv02d_init_device(struct lis3lv02d *dev)
 {
+	int err;
+	irq_handler_t thread_fn;
+
 	dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
 
 	switch (dev->whoami) {
@@ -567,25 +693,8 @@
 	if (dev->pdata) {
 		struct lis3lv02d_platform_data *p = dev->pdata;
 
-		if (p->click_flags && (dev->whoami == WAI_8B)) {
-			dev->write(dev, CLICK_CFG, p->click_flags);
-			dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
-			dev->write(dev, CLICK_LATENCY, p->click_latency);
-			dev->write(dev, CLICK_WINDOW, p->click_window);
-			dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
-			dev->write(dev, CLICK_THSY_X,
-					(p->click_thresh_x & 0xf) |
-					(p->click_thresh_y << 4));
-		}
-
-		if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
-			dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
-			dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
-			/* default to 2.5ms for now */
-			dev->write(dev, FF_WU_DURATION_1, 1);
-			/* enable high pass filter for both free-fall units */
-			dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
-		}
+		if (dev->whoami == WAI_8B)
+			lis3lv02d_8b_configure(dev, p);
 
 		if (p->irq_cfg)
 			dev->write(dev, CTRL_REG3, p->irq_cfg);
@@ -598,6 +707,32 @@
 		goto out;
 	}
 
+	/*
+	 * The sensor can generate interrupts for free-fall and direction
+	 * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
+	 * the things simple and _fast_ we activate it only for free-fall, so
+	 * no need to read register (very slow with ACPI). For the same reason,
+	 * we forbid shared interrupts.
+	 *
+	 * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
+	 * io-apic is not configurable (and generates a warning) but I keep it
+	 * in case of support for other hardware.
+	 */
+	if (dev->whoami == WAI_8B)
+		thread_fn = lis302dl_interrupt_thread1_8b;
+	else
+		thread_fn = NULL;
+
+	err = request_threaded_irq(dev->irq, lis302dl_interrupt,
+				thread_fn,
+				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				DRIVER_NAME, &lis3_dev);
+
+	if (err < 0) {
+		printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+		goto out;
+	}
+
 	if (misc_register(&lis3lv02d_misc_device))
 		printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
 out:
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e6a01f4..8540913 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -196,6 +196,16 @@
 	DD_SRC_IA	= 0x40,
 };
 
+enum lis3lv02d_click_src_8b {
+	CLICK_SINGLE_X	= 0x01,
+	CLICK_DOUBLE_X	= 0x02,
+	CLICK_SINGLE_Y	= 0x04,
+	CLICK_DOUBLE_Y	= 0x08,
+	CLICK_SINGLE_Z	= 0x10,
+	CLICK_DOUBLE_Z	= 0x20,
+	CLICK_IA	= 0x40,
+};
+
 struct axis_conversion {
 	s8	x;
 	s8	y;
@@ -223,6 +233,7 @@
 	struct platform_device	*pdev;     /* platform device */
 	atomic_t		count;     /* interrupt count after last read */
 	struct axis_conversion	ac;        /* hw -> logical axis */
+	int			mapped_btns[3];
 
 	u32			irq;       /* IRQ number */
 	struct fasync_struct	*async_queue; /* queue for the misc device */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 964a55f..ac4cfee 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -170,17 +170,6 @@
 }
 
 /*
- * convert hex to binary
- */
-static inline u8 hex2bin(char c)
-{
-	int result = c & 0x0f;
-	if (c & 0x40)
-		result += 9;
-	return result;
-}
-
-/*
  * convert an IE from Gigaset hex string to ETSI binary representation
  * including length byte
  * return value: result length, -1 on error
@@ -191,7 +180,7 @@
 	while (*in) {
 		if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
 			return -1;
-		out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
+		out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
 		in += 2;
 	}
 	out[0] = l;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0d0d625..26386a9 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -14,11 +14,17 @@
 if MISC_DEVICES
 
 config AD525X_DPOT
-	tristate "Analog Devices AD525x Digital Potentiometers"
-	depends on I2C && SYSFS
+	tristate "Analog Devices Digital Potentiometers"
+	depends on (I2C || SPI) && SYSFS
 	help
 	  If you say yes here, you get support for the Analog Devices
-	  AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
+	  AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
+	  AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
+	  AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
+	  AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
+	  AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
+	  AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
+	  ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
 	  digital potentiometer chips.
 
 	  See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -27,6 +33,26 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called ad525x_dpot.
 
+config AD525X_DPOT_I2C
+	tristate "support I2C bus connection"
+	depends on AD525X_DPOT && I2C
+	help
+	  Say Y here if you have a digital potentiometers hooked to an I2C bus.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad525x_dpot-i2c.
+
+config AD525X_DPOT_SPI
+	tristate "support SPI bus connection"
+	depends on AD525X_DPOT && SPI_MASTER
+	help
+	  Say Y here if you have a digital potentiometers hooked to an SPI bus.
+
+	  If unsure, say N (but it's safe to say "Y").
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad525x_dpot-spi.
+
 config ATMEL_PWM
 	tristate "Atmel AT32/AT91 PWM support"
 	depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f12dc3e..6ed06a1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,8 @@
 
 obj-$(CONFIG_IBM_ASM)		+= ibmasm/
 obj-$(CONFIG_AD525X_DPOT)	+= ad525x_dpot.o
+obj-$(CONFIG_AD525X_DPOT_I2C)	+= ad525x_dpot-i2c.o
+obj-$(CONFIG_AD525X_DPOT_SPI)	+= ad525x_dpot-spi.o
 obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
 obj-$(CONFIG_ATMEL_TCLIB)	+= atmel_tclib.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 0000000..374352a
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,134 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (I2C bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+/* ------------------------------------------------------------------------- */
+/* I2C bus functions */
+static int write_d8(void *client, u8 val)
+{
+	return i2c_smbus_write_byte(client, val);
+}
+
+static int write_r8d8(void *client, u8 reg, u8 val)
+{
+	return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int write_r8d16(void *client, u8 reg, u16 val)
+{
+	return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int read_d8(void *client)
+{
+	return i2c_smbus_read_byte(client);
+}
+
+static int read_r8d8(void *client, u8 reg)
+{
+	return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int read_r8d16(void *client, u8 reg)
+{
+	return i2c_smbus_read_word_data(client, reg);
+}
+
+static const struct ad_dpot_bus_ops bops = {
+	.read_d8	= read_d8,
+	.read_r8d8	= read_r8d8,
+	.read_r8d16	= read_r8d16,
+	.write_d8	= write_d8,
+	.write_r8d8	= write_r8d8,
+	.write_r8d16	= write_r8d16,
+};
+
+static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
+				      const struct i2c_device_id *id)
+{
+	struct ad_dpot_bus_data bdata = {
+		.client = client,
+		.bops = &bops,
+	};
+
+	struct ad_dpot_id dpot_id = {
+		.name = (char *) &id->name,
+		.devid = id->driver_data,
+	};
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_WORD_DATA)) {
+		dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+		return -EIO;
+	}
+
+	return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+}
+
+static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
+{
+	return ad_dpot_remove(&client->dev);
+}
+
+static const struct i2c_device_id ad_dpot_id[] = {
+	{"ad5258", AD5258_ID},
+	{"ad5259", AD5259_ID},
+	{"ad5251", AD5251_ID},
+	{"ad5252", AD5252_ID},
+	{"ad5253", AD5253_ID},
+	{"ad5254", AD5254_ID},
+	{"ad5255", AD5255_ID},
+	{"ad5241", AD5241_ID},
+	{"ad5242", AD5242_ID},
+	{"ad5243", AD5243_ID},
+	{"ad5245", AD5245_ID},
+	{"ad5246", AD5246_ID},
+	{"ad5247", AD5247_ID},
+	{"ad5248", AD5248_ID},
+	{"ad5280", AD5280_ID},
+	{"ad5282", AD5282_ID},
+	{"adn2860", ADN2860_ID},
+	{"ad5273", AD5273_ID},
+	{"ad5171", AD5171_ID},
+	{"ad5170", AD5170_ID},
+	{"ad5172", AD5172_ID},
+	{"ad5173", AD5173_ID},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
+
+static struct i2c_driver ad_dpot_i2c_driver = {
+	.driver = {
+		.name	= "ad_dpot",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ad_dpot_i2c_probe,
+	.remove		= __devexit_p(ad_dpot_i2c_remove),
+	.id_table	= ad_dpot_id,
+};
+
+static int __init ad_dpot_i2c_init(void)
+{
+	return i2c_add_driver(&ad_dpot_i2c_driver);
+}
+module_init(ad_dpot_i2c_init);
+
+static void __exit ad_dpot_i2c_exit(void)
+{
+	i2c_del_driver(&ad_dpot_i2c_driver);
+}
+module_exit(ad_dpot_i2c_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 0000000..b8c6df9
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,172 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (SPI bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
+	{.name = "ad5160", .devid = AD5160_ID},
+	{.name = "ad5161", .devid = AD5161_ID},
+	{.name = "ad5162", .devid = AD5162_ID},
+	{.name = "ad5165", .devid = AD5165_ID},
+	{.name = "ad5200", .devid = AD5200_ID},
+	{.name = "ad5201", .devid = AD5201_ID},
+	{.name = "ad5203", .devid = AD5203_ID},
+	{.name = "ad5204", .devid = AD5204_ID},
+	{.name = "ad5206", .devid = AD5206_ID},
+	{.name = "ad5207", .devid = AD5207_ID},
+	{.name = "ad5231", .devid = AD5231_ID},
+	{.name = "ad5232", .devid = AD5232_ID},
+	{.name = "ad5233", .devid = AD5233_ID},
+	{.name = "ad5235", .devid = AD5235_ID},
+	{.name = "ad5260", .devid = AD5260_ID},
+	{.name = "ad5262", .devid = AD5262_ID},
+	{.name = "ad5263", .devid = AD5263_ID},
+	{.name = "ad5290", .devid = AD5290_ID},
+	{.name = "ad5291", .devid = AD5291_ID},
+	{.name = "ad5292", .devid = AD5292_ID},
+	{.name = "ad5293", .devid = AD5293_ID},
+	{.name = "ad7376", .devid = AD7376_ID},
+	{.name = "ad8400", .devid = AD8400_ID},
+	{.name = "ad8402", .devid = AD8402_ID},
+	{.name = "ad8403", .devid = AD8403_ID},
+	{.name = "adn2850", .devid = ADN2850_ID},
+	{}
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* SPI bus functions */
+static int write8(void *client, u8 val)
+{
+	u8 data = val;
+	return spi_write(client, &data, 1);
+}
+
+static int write16(void *client, u8 reg, u8 val)
+{
+	u8 data[2] = {reg, val};
+	return spi_write(client, data, 1);
+}
+
+static int write24(void *client, u8 reg, u16 val)
+{
+	u8 data[3] = {reg, val >> 8, val};
+	return spi_write(client, data, 1);
+}
+
+static int read8(void *client)
+{
+	int ret;
+	u8 data;
+	ret = spi_read(client, &data, 1);
+	if (ret < 0)
+		return ret;
+
+	return data;
+}
+
+static int read16(void *client, u8 reg)
+{
+	int ret;
+	u8 buf_rx[2];
+
+	write16(client, reg, 0);
+	ret = spi_read(client, buf_rx, 2);
+	if (ret < 0)
+		return ret;
+
+	return (buf_rx[0] << 8) |  buf_rx[1];
+}
+
+static int read24(void *client, u8 reg)
+{
+	int ret;
+	u8 buf_rx[3];
+
+	write24(client, reg, 0);
+	ret = spi_read(client, buf_rx, 3);
+	if (ret < 0)
+		return ret;
+
+	return (buf_rx[1] << 8) |  buf_rx[2];
+}
+
+static const struct ad_dpot_bus_ops bops = {
+	.read_d8	= read8,
+	.read_r8d8	= read16,
+	.read_r8d16	= read24,
+	.write_d8	= write8,
+	.write_r8d8	= write16,
+	.write_r8d16	= write24,
+};
+
+static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
+						char *name)
+{
+	while (id->name && id->name[0]) {
+		if (strcmp(name, id->name) == 0)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
+{
+	char *name = spi->dev.platform_data;
+	const struct ad_dpot_id *dpot_id;
+
+	struct ad_dpot_bus_data bdata = {
+		.client = spi,
+		.bops = &bops,
+	};
+
+	dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
+
+	if (dpot_id == NULL) {
+		dev_err(&spi->dev, "%s not in supported device list", name);
+		return -ENODEV;
+	}
+
+	return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+}
+
+static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
+{
+	return ad_dpot_remove(&spi->dev);
+}
+
+static struct spi_driver ad_dpot_spi_driver = {
+	.driver = {
+		.name	= "ad_dpot",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ad_dpot_spi_probe,
+	.remove		= __devexit_p(ad_dpot_spi_remove),
+};
+
+static int __init ad_dpot_spi_init(void)
+{
+	return spi_register_driver(&ad_dpot_spi_driver);
+}
+module_init(ad_dpot_spi_init);
+
+static void __exit ad_dpot_spi_exit(void)
+{
+	spi_unregister_driver(&ad_dpot_spi_driver);
+}
+module_exit(ad_dpot_spi_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 30a59f2..5e6fa84 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,6 +1,6 @@
 /*
- * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
- * Copyright (c) 2009 Analog Devices, Inc.
+ * ad525x_dpot: Driver for the Analog Devices digital potentiometers
+ * Copyright (c) 2009-2010 Analog Devices, Inc.
  * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
  *
  * DEVID		#Wipers		#Positions 	Resistor Options (kOhm)
@@ -11,6 +11,47 @@
  * AD5255		3		512		25, 250
  * AD5253		4		64		1, 10, 50, 100
  * AD5254		4		256		1, 10, 50, 100
+ * AD5160		1		256		5, 10, 50, 100
+ * AD5161		1		256		5, 10, 50, 100
+ * AD5162		2		256		2.5, 10, 50, 100
+ * AD5165		1		256		100
+ * AD5200		1		256		10, 50
+ * AD5201		1		33		10, 50
+ * AD5203		4		64		10, 100
+ * AD5204		4		256		10, 50, 100
+ * AD5206		6		256		10, 50, 100
+ * AD5207		2		256		10, 50, 100
+ * AD5231		1		1024		10, 50, 100
+ * AD5232		2		256		10, 50, 100
+ * AD5233		4		64		10, 50, 100
+ * AD5235		2		1024		25, 250
+ * AD5260		1		256		20, 50, 200
+ * AD5262		2		256		20, 50, 200
+ * AD5263		4		256		20, 50, 200
+ * AD5290		1		256		10, 50, 100
+ * AD5291		1		256		20
+ * AD5292		1		1024		20
+ * AD5293		1		1024		20
+ * AD7376		1		128		10, 50, 100, 1M
+ * AD8400		1		256		1, 10, 50, 100
+ * AD8402		2		256		1, 10, 50, 100
+ * AD8403		4		256		1, 10, 50, 100
+ * ADN2850		3		512		25, 250
+ * AD5241		1		256		10, 100, 1M
+ * AD5246		1		128		5, 10, 50, 100
+ * AD5247		1		128		5, 10, 50, 100
+ * AD5245		1		256		5, 10, 50, 100
+ * AD5243		2		256		2.5, 10, 50, 100
+ * AD5248		2		256		2.5, 10, 50, 100
+ * AD5242		2		256		20, 50, 200
+ * AD5280		1		256		20, 50, 200
+ * AD5282		2		256		20, 50, 200
+ * ADN2860		3		512		25, 250
+ * AD5273		1		64		1, 10, 50, 100 (OTP)
+ * AD5171		1		64		5, 10, 50, 100 (OTP)
+ * AD5170		1		256		2.5, 10, 50, 100 (OTP)
+ * AD5172		2		256		2.5, 10, 50, 100 (OTP)
+ * AD5173		2		256		2.5, 10, 50, 100 (OTP)
  *
  * See Documentation/misc-devices/ad525x_dpot.txt for more info.
  *
@@ -28,77 +69,283 @@
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
 
-#define DRIVER_NAME			"ad525x_dpot"
-#define DRIVER_VERSION			"0.1"
+#define DRIVER_VERSION			"0.2"
 
-enum dpot_devid {
-	AD5258_ID,
-	AD5259_ID,
-	AD5251_ID,
-	AD5252_ID,
-	AD5253_ID,
-	AD5254_ID,
-	AD5255_ID,
-};
-
-#define AD5258_MAX_POSITION		64
-#define AD5259_MAX_POSITION		256
-#define AD5251_MAX_POSITION		64
-#define AD5252_MAX_POSITION		256
-#define AD5253_MAX_POSITION		64
-#define AD5254_MAX_POSITION		256
-#define AD5255_MAX_POSITION		512
-
-#define AD525X_RDAC0		0
-#define AD525X_RDAC1		1
-#define AD525X_RDAC2		2
-#define AD525X_RDAC3		3
-
-#define AD525X_REG_TOL		0x18
-#define AD525X_TOL_RDAC0	(AD525X_REG_TOL | AD525X_RDAC0)
-#define AD525X_TOL_RDAC1	(AD525X_REG_TOL | AD525X_RDAC1)
-#define AD525X_TOL_RDAC2	(AD525X_REG_TOL | AD525X_RDAC2)
-#define AD525X_TOL_RDAC3	(AD525X_REG_TOL | AD525X_RDAC3)
-
-/* RDAC-to-EEPROM Interface Commands */
-#define AD525X_I2C_RDAC		(0x00 << 5)
-#define AD525X_I2C_EEPROM	(0x01 << 5)
-#define AD525X_I2C_CMD		(0x80)
-
-#define AD525X_DEC_ALL_6DB	(AD525X_I2C_CMD | (0x4 << 3))
-#define AD525X_INC_ALL_6DB	(AD525X_I2C_CMD | (0x9 << 3))
-#define AD525X_DEC_ALL		(AD525X_I2C_CMD | (0x6 << 3))
-#define AD525X_INC_ALL		(AD525X_I2C_CMD | (0xB << 3))
-
-static s32 ad525x_read(struct i2c_client *client, u8 reg);
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
+#include "ad525x_dpot.h"
 
 /*
  * Client data (each client gets its own)
  */
 
 struct dpot_data {
+	struct ad_dpot_bus_data	bdata;
 	struct mutex update_lock;
 	unsigned rdac_mask;
 	unsigned max_pos;
-	unsigned devid;
+	unsigned long devid;
+	unsigned uid;
+	unsigned feat;
+	unsigned wipers;
+	u16 rdac_cache[MAX_RDACS];
+	DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
 };
 
+static inline int dpot_read_d8(struct dpot_data *dpot)
+{
+	return dpot->bdata.bops->read_d8(dpot->bdata.client);
+}
+
+static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
+{
+	return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
+}
+
+static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
+{
+	return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
+}
+
+static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
+{
+	return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
+}
+
+static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
+{
+	return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
+}
+
+static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
+{
+	return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
+}
+
+static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
+{
+	unsigned ctrl = 0;
+
+	if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+
+		if (dpot->feat & F_RDACS_WONLY)
+			return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
+
+		if (dpot->uid == DPOT_UID(AD5291_ID) ||
+			dpot->uid == DPOT_UID(AD5292_ID) ||
+			dpot->uid == DPOT_UID(AD5293_ID))
+			return dpot_read_r8d8(dpot,
+				DPOT_AD5291_READ_RDAC << 2);
+
+		ctrl = DPOT_SPI_READ_RDAC;
+	} else if (reg & DPOT_ADDR_EEPROM) {
+		ctrl = DPOT_SPI_READ_EEPROM;
+	}
+
+	if (dpot->feat & F_SPI_16BIT)
+		return dpot_read_r8d8(dpot, ctrl);
+	else if (dpot->feat & F_SPI_24BIT)
+		return dpot_read_r8d16(dpot, ctrl);
+
+	return -EFAULT;
+}
+
+static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+{
+	unsigned ctrl = 0;
+	switch (dpot->uid) {
+	case DPOT_UID(AD5246_ID):
+	case DPOT_UID(AD5247_ID):
+		return dpot_read_d8(dpot);
+	case DPOT_UID(AD5245_ID):
+	case DPOT_UID(AD5241_ID):
+	case DPOT_UID(AD5242_ID):
+	case DPOT_UID(AD5243_ID):
+	case DPOT_UID(AD5248_ID):
+	case DPOT_UID(AD5280_ID):
+	case DPOT_UID(AD5282_ID):
+		ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+			0 : DPOT_AD5291_RDAC_AB;
+		return dpot_read_r8d8(dpot, ctrl);
+	case DPOT_UID(AD5170_ID):
+	case DPOT_UID(AD5171_ID):
+	case DPOT_UID(AD5273_ID):
+			return dpot_read_d8(dpot);
+	case DPOT_UID(AD5172_ID):
+	case DPOT_UID(AD5173_ID):
+		ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+			0 : DPOT_AD5272_3_A0;
+		return dpot_read_r8d8(dpot, ctrl);
+	default:
+		if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
+			return dpot_read_r8d16(dpot, (reg & 0xF8) |
+					((reg & 0x7) << 1));
+		else
+			return dpot_read_r8d8(dpot, reg);
+	}
+}
+
+static s32 dpot_read(struct dpot_data *dpot, u8 reg)
+{
+	if (dpot->feat & F_SPI)
+		return dpot_read_spi(dpot, reg);
+	else
+		return dpot_read_i2c(dpot, reg);
+}
+
+static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
+{
+	unsigned val = 0;
+
+	if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+		if (dpot->feat & F_RDACS_WONLY)
+			dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
+
+		if (dpot->feat & F_AD_APPDATA) {
+			if (dpot->feat & F_SPI_8BIT) {
+				val = ((reg & DPOT_RDAC_MASK) <<
+					DPOT_MAX_POS(dpot->devid)) |
+					value;
+				return dpot_write_d8(dpot, val);
+			} else if (dpot->feat & F_SPI_16BIT) {
+				val = ((reg & DPOT_RDAC_MASK) <<
+					DPOT_MAX_POS(dpot->devid)) |
+					value;
+				return dpot_write_r8d8(dpot, val >> 8,
+					val & 0xFF);
+			} else
+				BUG();
+		} else {
+			if (dpot->uid == DPOT_UID(AD5291_ID) ||
+				dpot->uid == DPOT_UID(AD5292_ID) ||
+				dpot->uid == DPOT_UID(AD5293_ID))
+				return dpot_write_r8d8(dpot,
+					(DPOT_AD5291_RDAC << 2) |
+					(value >> 8), value & 0xFF);
+
+			val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
+		}
+	} else if (reg & DPOT_ADDR_EEPROM) {
+		val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
+	} else if (reg & DPOT_ADDR_CMD) {
+		switch (reg) {
+		case DPOT_DEC_ALL_6DB:
+			val = DPOT_SPI_DEC_ALL_6DB;
+			break;
+		case DPOT_INC_ALL_6DB:
+			val = DPOT_SPI_INC_ALL_6DB;
+			break;
+		case DPOT_DEC_ALL:
+			val = DPOT_SPI_DEC_ALL;
+			break;
+		case DPOT_INC_ALL:
+			val = DPOT_SPI_INC_ALL;
+			break;
+		}
+	} else
+		BUG();
+
+	if (dpot->feat & F_SPI_16BIT)
+		return dpot_write_r8d8(dpot, val, value);
+	else if (dpot->feat & F_SPI_24BIT)
+		return dpot_write_r8d16(dpot, val, value);
+
+	return -EFAULT;
+}
+
+static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
+{
+	/* Only write the instruction byte for certain commands */
+	unsigned tmp = 0, ctrl = 0;
+
+	switch (dpot->uid) {
+	case DPOT_UID(AD5246_ID):
+	case DPOT_UID(AD5247_ID):
+		return dpot_write_d8(dpot, value);
+		break;
+
+	case DPOT_UID(AD5245_ID):
+	case DPOT_UID(AD5241_ID):
+	case DPOT_UID(AD5242_ID):
+	case DPOT_UID(AD5243_ID):
+	case DPOT_UID(AD5248_ID):
+	case DPOT_UID(AD5280_ID):
+	case DPOT_UID(AD5282_ID):
+		ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+			0 : DPOT_AD5291_RDAC_AB;
+		return dpot_write_r8d8(dpot, ctrl, value);
+		break;
+	case DPOT_UID(AD5171_ID):
+	case DPOT_UID(AD5273_ID):
+		if (reg & DPOT_ADDR_OTP) {
+			tmp = dpot_read_d8(dpot);
+			if (tmp >> 6) /* Ready to Program? */
+				return -EFAULT;
+			ctrl = DPOT_AD5273_FUSE;
+		}
+		return dpot_write_r8d8(dpot, ctrl, value);
+		break;
+	case DPOT_UID(AD5172_ID):
+	case DPOT_UID(AD5173_ID):
+		ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+			0 : DPOT_AD5272_3_A0;
+		if (reg & DPOT_ADDR_OTP) {
+			tmp = dpot_read_r8d16(dpot, ctrl);
+			if (tmp >> 14) /* Ready to Program? */
+				return -EFAULT;
+			ctrl |= DPOT_AD5270_2_3_FUSE;
+		}
+		return dpot_write_r8d8(dpot, ctrl, value);
+		break;
+	case DPOT_UID(AD5170_ID):
+		if (reg & DPOT_ADDR_OTP) {
+			tmp = dpot_read_r8d16(dpot, tmp);
+			if (tmp >> 14) /* Ready to Program? */
+				return -EFAULT;
+			ctrl = DPOT_AD5270_2_3_FUSE;
+		}
+		return dpot_write_r8d8(dpot, ctrl, value);
+		break;
+	default:
+		if (reg & DPOT_ADDR_CMD)
+			return dpot_write_d8(dpot, reg);
+
+		if (dpot->max_pos > 256)
+			return dpot_write_r8d16(dpot, (reg & 0xF8) |
+						((reg & 0x7) << 1), value);
+		else
+			/* All other registers require instruction + data bytes */
+			return dpot_write_r8d8(dpot, reg, value);
+	}
+}
+
+
+static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
+{
+	if (dpot->feat & F_SPI)
+		return dpot_write_spi(dpot, reg, value);
+	else
+		return dpot_write_i2c(dpot, reg, value);
+}
+
 /* sysfs functions */
 
 static ssize_t sysfs_show_reg(struct device *dev,
-			      struct device_attribute *attr, char *buf, u32 reg)
+			      struct device_attribute *attr,
+			      char *buf, u32 reg)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct dpot_data *data = i2c_get_clientdata(client);
+	struct dpot_data *data = dev_get_drvdata(dev);
 	s32 value;
 
+	if (reg & DPOT_ADDR_OTP_EN)
+		return sprintf(buf, "%s\n",
+			test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
+			"enabled" : "disabled");
+
+
 	mutex_lock(&data->update_lock);
-	value = ad525x_read(client, reg);
+	value = dpot_read(data, reg);
 	mutex_unlock(&data->update_lock);
 
 	if (value < 0)
@@ -111,7 +358,7 @@
 	 * datasheet (Rev. A) for more details.
 	 */
 
-	if (reg & AD525X_REG_TOL)
+	if (reg & DPOT_REG_TOL)
 		return sprintf(buf, "0x%04x\n", value & 0xFFFF);
 	else
 		return sprintf(buf, "%u\n", value & data->rdac_mask);
@@ -121,11 +368,23 @@
 			     struct device_attribute *attr,
 			     const char *buf, size_t count, u32 reg)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct dpot_data *data = i2c_get_clientdata(client);
+	struct dpot_data *data = dev_get_drvdata(dev);
 	unsigned long value;
 	int err;
 
+	if (reg & DPOT_ADDR_OTP_EN) {
+		if (!strncmp(buf, "enabled", sizeof("enabled")))
+			set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+		else
+			clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+
+		return count;
+	}
+
+	if ((reg & DPOT_ADDR_OTP) &&
+		!test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
+		return -EPERM;
+
 	err = strict_strtoul(buf, 10, &value);
 	if (err)
 		return err;
@@ -134,9 +393,11 @@
 		value = data->rdac_mask;
 
 	mutex_lock(&data->update_lock);
-	ad525x_write(client, reg, value);
-	if (reg & AD525X_I2C_EEPROM)
+	dpot_write(data, reg, value);
+	if (reg & DPOT_ADDR_EEPROM)
 		msleep(26);	/* Sleep while the EEPROM updates */
+	else if (reg & DPOT_ADDR_OTP)
+		msleep(400);	/* Sleep while the OTP updates */
 	mutex_unlock(&data->update_lock);
 
 	return count;
@@ -146,11 +407,10 @@
 			    struct device_attribute *attr,
 			    const char *buf, size_t count, u32 reg)
 {
-	struct i2c_client *client = to_i2c_client(dev);
-	struct dpot_data *data = i2c_get_clientdata(client);
+	struct dpot_data *data = dev_get_drvdata(dev);
 
 	mutex_lock(&data->update_lock);
-	ad525x_write(client, reg, 0);
+	dpot_write(data, reg, 0);
 	mutex_unlock(&data->update_lock);
 
 	return count;
@@ -158,244 +418,131 @@
 
 /* ------------------------------------------------------------------------- */
 
-static ssize_t show_rdac0(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
+#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
+show_##_name(struct device *dev, \
+			  struct device_attribute *attr, char *buf) \
+{ \
+	return sysfs_show_reg(dev, attr, buf, _reg); \
 }
 
-static ssize_t set_rdac0(struct device *dev,
-			 struct device_attribute *attr,
-			 const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_RDAC | AD525X_RDAC0);
+#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
+set_##_name(struct device *dev, \
+			 struct device_attribute *attr, \
+			 const char *buf, size_t count) \
+{ \
+	return sysfs_set_reg(dev, attr, buf, count, _reg); \
 }
 
-static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
+#define DPOT_DEVICE_SHOW_SET(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+DPOT_DEVICE_SET(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
 
-static ssize_t show_eeprom0(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
+#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
 
-static ssize_t set_eeprom0(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
+DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
 
-static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
+DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
 
-static ssize_t show_tolerance0(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf,
-			      AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
-}
+DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
 
-static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
+DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
 
-/* ------------------------------------------------------------------------- */
+DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
 
-static ssize_t show_rdac1(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
-}
+DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
 
-static ssize_t set_rdac1(struct device *dev,
-			 struct device_attribute *attr,
-			 const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_RDAC | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
-
-static ssize_t show_eeprom1(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static ssize_t set_eeprom1(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
-
-static ssize_t show_tolerance1(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf,
-			      AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
-}
-
-static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac2(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static ssize_t set_rdac2(struct device *dev,
-			 struct device_attribute *attr,
-			 const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
-
-static ssize_t show_eeprom2(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static ssize_t set_eeprom2(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
-
-static ssize_t show_tolerance2(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf,
-			      AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
-}
-
-static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac3(struct device *dev,
-			  struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static ssize_t set_rdac3(struct device *dev,
-			 struct device_attribute *attr,
-			 const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
-
-static ssize_t show_eeprom3(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
-
-static ssize_t set_eeprom3(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_set_reg(dev, attr, buf, count,
-			     AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
-
-static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
-
-static ssize_t show_tolerance3(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	return sysfs_show_reg(dev, attr, buf,
-			      AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
-}
-
-static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
-
-static struct attribute *ad525x_attributes_wipers[4][4] = {
-	{
-		&dev_attr_rdac0.attr,
-		&dev_attr_eeprom0.attr,
-		&dev_attr_tolerance0.attr,
-		NULL
-	}, {
-		&dev_attr_rdac1.attr,
-		&dev_attr_eeprom1.attr,
-		&dev_attr_tolerance1.attr,
-		NULL
-	}, {
-		&dev_attr_rdac2.attr,
-		&dev_attr_eeprom2.attr,
-		&dev_attr_tolerance2.attr,
-		NULL
-	}, {
-		&dev_attr_rdac3.attr,
-		&dev_attr_eeprom3.attr,
-		&dev_attr_tolerance3.attr,
-		NULL
-	}
+static const struct attribute *dpot_attrib_wipers[] = {
+	&dev_attr_rdac0.attr,
+	&dev_attr_rdac1.attr,
+	&dev_attr_rdac2.attr,
+	&dev_attr_rdac3.attr,
+	&dev_attr_rdac4.attr,
+	&dev_attr_rdac5.attr,
+	NULL
 };
 
-static const struct attribute_group ad525x_group_wipers[] = {
-	{.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
-	{.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
-	{.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
-	{.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
+static const struct attribute *dpot_attrib_eeprom[] = {
+	&dev_attr_eeprom0.attr,
+	&dev_attr_eeprom1.attr,
+	&dev_attr_eeprom2.attr,
+	&dev_attr_eeprom3.attr,
+	&dev_attr_eeprom4.attr,
+	&dev_attr_eeprom5.attr,
+	NULL
+};
+
+static const struct attribute *dpot_attrib_otp[] = {
+	&dev_attr_otp0.attr,
+	&dev_attr_otp1.attr,
+	&dev_attr_otp2.attr,
+	&dev_attr_otp3.attr,
+	&dev_attr_otp4.attr,
+	&dev_attr_otp5.attr,
+	NULL
+};
+
+static const struct attribute *dpot_attrib_otp_en[] = {
+	&dev_attr_otp0en.attr,
+	&dev_attr_otp1en.attr,
+	&dev_attr_otp2en.attr,
+	&dev_attr_otp3en.attr,
+	&dev_attr_otp4en.attr,
+	&dev_attr_otp5en.attr,
+	NULL
+};
+
+static const struct attribute *dpot_attrib_tolerance[] = {
+	&dev_attr_tolerance0.attr,
+	&dev_attr_tolerance1.attr,
+	&dev_attr_tolerance2.attr,
+	&dev_attr_tolerance3.attr,
+	&dev_attr_tolerance4.attr,
+	&dev_attr_tolerance5.attr,
+	NULL
 };
 
 /* ------------------------------------------------------------------------- */
 
-static ssize_t set_inc_all(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
-}
+#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
+set_##_name(struct device *dev, \
+			 struct device_attribute *attr, \
+			 const char *buf, size_t count) \
+{ \
+	return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
+} \
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
 
-static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
-
-static ssize_t set_dec_all(struct device *dev,
-			   struct device_attribute *attr,
-			   const char *buf, size_t count)
-{
-	return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
-}
-
-static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
-
-static ssize_t set_inc_all_6db(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
-}
-
-static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
-
-static ssize_t set_dec_all_6db(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
-}
-
-static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
+DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
+DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
+DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
+DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
 
 static struct attribute *ad525x_attributes_commands[] = {
 	&dev_attr_inc_all.attr,
@@ -409,258 +556,131 @@
 	.attrs = ad525x_attributes_commands,
 };
 
-/* ------------------------------------------------------------------------- */
-
-/* i2c device functions */
-
-/**
- * ad525x_read - return the value contained in the specified register
- * on the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to read
- *
- * If the tolerance register is specified, 2 bytes are returned.
- * Otherwise, 1 byte is returned.  A negative value indicates an error
- * occurred while reading the register.
- */
-static s32 ad525x_read(struct i2c_client *client, u8 reg)
+__devinit int ad_dpot_add_files(struct device *dev,
+		unsigned features, unsigned rdac)
 {
-	struct dpot_data *data = i2c_get_clientdata(client);
-
-	if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
-		return i2c_smbus_read_word_data(client, (reg & 0xF8) |
-						((reg & 0x7) << 1));
-	else
-		return i2c_smbus_read_byte_data(client, reg);
-}
-
-/**
- * ad525x_write - store the given value in the specified register on
- * the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to write
- * @value: the byte to store in the register
- *
- * For certain instructions that do not require a data byte, "NULL"
- * should be specified for the "value" parameter.  These instructions
- * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
- *
- * A negative return value indicates an error occurred while reading
- * the register.
- */
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
-{
-	struct dpot_data *data = i2c_get_clientdata(client);
-
-	/* Only write the instruction byte for certain commands */
-	if (reg & AD525X_I2C_CMD)
-		return i2c_smbus_write_byte(client, reg);
-
-	if (data->max_pos > 256)
-		return i2c_smbus_write_word_data(client, (reg & 0xF8) |
-						((reg & 0x7) << 1), value);
-	else
-		/* All other registers require instruction + data bytes */
-		return i2c_smbus_write_byte_data(client, reg, value);
-}
-
-static int ad525x_probe(struct i2c_client *client,
-			const struct i2c_device_id *id)
-{
-	struct device *dev = &client->dev;
-	struct dpot_data *data;
-	int err = 0;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
-		dev_err(dev, "missing I2C functionality for this driver\n");
-		goto exit;
+	int err = sysfs_create_file(&dev->kobj,
+		dpot_attrib_wipers[rdac]);
+	if (features & F_CMD_EEP)
+		err |= sysfs_create_file(&dev->kobj,
+			dpot_attrib_eeprom[rdac]);
+	if (features & F_CMD_TOL)
+		err |= sysfs_create_file(&dev->kobj,
+			dpot_attrib_tolerance[rdac]);
+	if (features & F_CMD_OTP) {
+		err |= sysfs_create_file(&dev->kobj,
+			dpot_attrib_otp_en[rdac]);
+		err |= sysfs_create_file(&dev->kobj,
+			dpot_attrib_otp[rdac]);
 	}
 
+	if (err)
+		dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
+			rdac);
+
+	return err;
+}
+
+inline void ad_dpot_remove_files(struct device *dev,
+		unsigned features, unsigned rdac)
+{
+	sysfs_remove_file(&dev->kobj,
+		dpot_attrib_wipers[rdac]);
+	if (features & F_CMD_EEP)
+		sysfs_remove_file(&dev->kobj,
+			dpot_attrib_eeprom[rdac]);
+	if (features & F_CMD_TOL)
+		sysfs_remove_file(&dev->kobj,
+			dpot_attrib_tolerance[rdac]);
+	if (features & F_CMD_OTP) {
+		sysfs_remove_file(&dev->kobj,
+			dpot_attrib_otp_en[rdac]);
+		sysfs_remove_file(&dev->kobj,
+			dpot_attrib_otp[rdac]);
+	}
+}
+
+__devinit int ad_dpot_probe(struct device *dev,
+		struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
+{
+
+	struct dpot_data *data;
+	int i, err = 0;
+
 	data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
 	if (!data) {
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	i2c_set_clientdata(client, data);
+	dev_set_drvdata(dev, data);
 	mutex_init(&data->update_lock);
 
-	switch (id->driver_data) {
-	case AD5258_ID:
-		data->max_pos = AD5258_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC0]);
-		break;
-	case AD5259_ID:
-		data->max_pos = AD5259_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC0]);
-		break;
-	case AD5251_ID:
-		data->max_pos = AD5251_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC1]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC3]);
-		err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5252_ID:
-		data->max_pos = AD5252_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC1]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC3]);
-		err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5253_ID:
-		data->max_pos = AD5253_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC0]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC1]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC2]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC3]);
-		err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5254_ID:
-		data->max_pos = AD5254_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC0]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC1]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC2]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC3]);
-		err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5255_ID:
-		data->max_pos = AD5255_MAX_POSITION;
-		err = sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC0]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC1]);
-		err |= sysfs_create_group(&dev->kobj,
-				       &ad525x_group_wipers[AD525X_RDAC2]);
-		err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	default:
-		err = -ENODEV;
-		goto exit_free;
-	}
+	data->bdata = *bdata;
+	data->devid = id->devid;
+
+	data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+	data->rdac_mask = data->max_pos - 1;
+	data->feat = DPOT_FEAT(data->devid);
+	data->uid = DPOT_UID(data->devid);
+	data->wipers = DPOT_WIPERS(data->devid);
+
+	for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+		if (data->wipers & (1 << i)) {
+			err = ad_dpot_add_files(dev, data->feat, i);
+			if (err)
+				goto exit_remove_files;
+			/* power-up midscale */
+			if (data->feat & F_RDACS_WONLY)
+				data->rdac_cache[i] = data->max_pos / 2;
+		}
+
+	if (data->feat & F_CMD_INC)
+		err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
 
 	if (err) {
 		dev_err(dev, "failed to register sysfs hooks\n");
 		goto exit_free;
 	}
 
-	data->devid = id->driver_data;
-	data->rdac_mask = data->max_pos - 1;
-
 	dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
 		 id->name, data->max_pos);
 
 	return 0;
 
+exit_remove_files:
+	for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+		if (data->wipers & (1 << i))
+			ad_dpot_remove_files(dev, data->feat, i);
+
 exit_free:
 	kfree(data);
-	i2c_set_clientdata(client, NULL);
+	dev_set_drvdata(dev, NULL);
 exit:
-	dev_err(dev, "failed to create client\n");
+	dev_err(dev, "failed to create client for %s ID 0x%lX\n",
+			id->name, id->devid);
 	return err;
 }
+EXPORT_SYMBOL(ad_dpot_probe);
 
-static int __devexit ad525x_remove(struct i2c_client *client)
+__devexit int ad_dpot_remove(struct device *dev)
 {
-	struct dpot_data *data = i2c_get_clientdata(client);
-	struct device *dev = &client->dev;
+	struct dpot_data *data = dev_get_drvdata(dev);
+	int i;
 
-	switch (data->devid) {
-	case AD5258_ID:
-	case AD5259_ID:
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC0]);
-		break;
-	case AD5251_ID:
-	case AD5252_ID:
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC1]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC3]);
-		sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5253_ID:
-	case AD5254_ID:
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC0]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC1]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC2]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC3]);
-		sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	case AD5255_ID:
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC0]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC1]);
-		sysfs_remove_group(&dev->kobj,
-				   &ad525x_group_wipers[AD525X_RDAC2]);
-		sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
-		break;
-	}
+	for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+		if (data->wipers & (1 << i))
+			ad_dpot_remove_files(dev, data->feat, i);
 
-	i2c_set_clientdata(client, NULL);
 	kfree(data);
 
 	return 0;
 }
+EXPORT_SYMBOL(ad_dpot_remove);
 
-static const struct i2c_device_id ad525x_idtable[] = {
-	{"ad5258", AD5258_ID},
-	{"ad5259", AD5259_ID},
-	{"ad5251", AD5251_ID},
-	{"ad5252", AD5252_ID},
-	{"ad5253", AD5253_ID},
-	{"ad5254", AD5254_ID},
-	{"ad5255", AD5255_ID},
-	{}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
-
-static struct i2c_driver ad525x_driver = {
-	.driver = {
-		   .owner = THIS_MODULE,
-		   .name = DRIVER_NAME,
-		   },
-	.id_table = ad525x_idtable,
-	.probe = ad525x_probe,
-	.remove = __devexit_p(ad525x_remove),
-};
-
-static int __init ad525x_init(void)
-{
-	return i2c_add_driver(&ad525x_driver);
-}
-
-module_init(ad525x_init);
-
-static void __exit ad525x_exit(void)
-{
-	i2c_del_driver(&ad525x_driver);
-}
-
-module_exit(ad525x_exit);
 
 MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
-	      "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
-MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
+	      "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Digital potentiometer driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 0000000..78b89fd
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,202 @@
+/*
+ * Driver for the Analog Devices digital potentiometers
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD_DPOT_H_
+#define _AD_DPOT_H_
+
+#include <linux/types.h>
+
+#define DPOT_CONF(features, wipers, max_pos, uid) \
+		(((features) << 18) | (((wipers) & 0xFF) << 10) | \
+		((max_pos & 0xF) << 6) | (uid & 0x3F))
+
+#define DPOT_UID(conf)		(conf & 0x3F)
+#define DPOT_MAX_POS(conf)	((conf >> 6) & 0xF)
+#define DPOT_WIPERS(conf)	((conf >> 10) & 0xFF)
+#define DPOT_FEAT(conf)		(conf >> 18)
+
+#define BRDAC0			(1 << 0)
+#define BRDAC1			(1 << 1)
+#define BRDAC2			(1 << 2)
+#define BRDAC3			(1 << 3)
+#define BRDAC4			(1 << 4)
+#define BRDAC5			(1 << 5)
+#define MAX_RDACS		6
+
+#define F_CMD_INC		(1 << 0)	/* Features INC/DEC ALL, 6dB */
+#define F_CMD_EEP		(1 << 1)	/* Features EEPROM */
+#define F_CMD_OTP		(1 << 2)	/* Features OTP */
+#define F_CMD_TOL		(1 << 3)	/* RDACS feature Tolerance REG */
+#define F_RDACS_RW		(1 << 4)	/* RDACS are Read/Write  */
+#define F_RDACS_WONLY		(1 << 5)	/* RDACS are Write only */
+#define F_AD_APPDATA		(1 << 6)	/* RDAC Address append to data */
+#define F_SPI_8BIT		(1 << 7)	/* All SPI XFERS are 8-bit */
+#define F_SPI_16BIT		(1 << 8)	/* All SPI XFERS are 16-bit */
+#define F_SPI_24BIT		(1 << 9)	/* All SPI XFERS are 24-bit */
+
+#define F_RDACS_RW_TOL	(F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
+#define F_RDACS_RW_EEP	(F_RDACS_RW | F_CMD_EEP)
+#define F_SPI		(F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
+
+enum dpot_devid {
+	AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
+	AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
+	AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC3, 6, 2),
+	AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC3, 8, 3),
+	AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
+	AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
+	AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
+	AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 7), /* SPI */
+	AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 8),
+	AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1, 8, 9),
+	AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 10),
+	AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 11),
+	AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 5, 12),
+	AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
+	AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
+	AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
+			8, 15),
+	AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1, 8, 16),
+	AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+			BRDAC0, 10, 17),
+	AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+			BRDAC0 | BRDAC1, 8, 18),
+	AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
+	AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+			BRDAC0 | BRDAC1, 10, 20),
+	AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 21),
+	AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1, 8, 22),
+	AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
+	AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 24),
+	AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
+	AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+	AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
+	AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 7, 28),
+	AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+			BRDAC0, 8, 29),
+	AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1, 8, 30),
+	AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+			BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
+	ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+			BRDAC0 | BRDAC1, 10, 32),
+	AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
+	AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
+	AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
+	AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
+	AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
+	AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
+	AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
+	AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
+	AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
+	ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+			BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
+	AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
+	AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
+	AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
+	AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
+	AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+};
+
+#define DPOT_RDAC0		0
+#define DPOT_RDAC1		1
+#define DPOT_RDAC2		2
+#define DPOT_RDAC3		3
+#define DPOT_RDAC4		4
+#define DPOT_RDAC5		5
+
+#define DPOT_RDAC_MASK		0x1F
+
+#define DPOT_REG_TOL		0x18
+#define DPOT_TOL_RDAC0		(DPOT_REG_TOL | DPOT_RDAC0)
+#define DPOT_TOL_RDAC1		(DPOT_REG_TOL | DPOT_RDAC1)
+#define DPOT_TOL_RDAC2		(DPOT_REG_TOL | DPOT_RDAC2)
+#define DPOT_TOL_RDAC3		(DPOT_REG_TOL | DPOT_RDAC3)
+#define DPOT_TOL_RDAC4		(DPOT_REG_TOL | DPOT_RDAC4)
+#define DPOT_TOL_RDAC5		(DPOT_REG_TOL | DPOT_RDAC5)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define DPOT_ADDR_RDAC		(0x0 << 5)
+#define DPOT_ADDR_EEPROM	(0x1 << 5)
+#define DPOT_ADDR_OTP		(0x1 << 6)
+#define DPOT_ADDR_CMD		(0x1 << 7)
+#define DPOT_ADDR_OTP_EN	(0x1 << 9)
+
+#define DPOT_DEC_ALL_6DB	(DPOT_ADDR_CMD | (0x4 << 3))
+#define DPOT_INC_ALL_6DB	(DPOT_ADDR_CMD | (0x9 << 3))
+#define DPOT_DEC_ALL		(DPOT_ADDR_CMD | (0x6 << 3))
+#define DPOT_INC_ALL		(DPOT_ADDR_CMD | (0xB << 3))
+
+#define DPOT_SPI_RDAC		0xB0
+#define DPOT_SPI_EEPROM		0x30
+#define DPOT_SPI_READ_RDAC	0xA0
+#define DPOT_SPI_READ_EEPROM	0x90
+#define DPOT_SPI_DEC_ALL_6DB	0x50
+#define DPOT_SPI_INC_ALL_6DB	0xD0
+#define DPOT_SPI_DEC_ALL	0x70
+#define DPOT_SPI_INC_ALL	0xF0
+
+/* AD5291/2/3 use special commands */
+#define DPOT_AD5291_RDAC	0x01
+#define DPOT_AD5291_READ_RDAC	0x02
+
+/* AD524x use special commands */
+#define DPOT_AD5291_RDAC_AB	0x80
+
+#define DPOT_AD5273_FUSE	0x80
+#define DPOT_AD5270_2_3_FUSE	0x20
+#define DPOT_AD5270_2_3_OW	0x08
+#define DPOT_AD5272_3_A0	0x08
+#define DPOT_AD5270_2FUSE	0x80
+
+struct dpot_data;
+
+struct ad_dpot_bus_ops {
+	int (*read_d8) (void *client);
+	int (*read_r8d8) (void *client, u8 reg);
+	int (*read_r8d16) (void *client, u8 reg);
+	int (*write_d8) (void *client, u8 val);
+	int (*write_r8d8) (void *client, u8 reg, u8 val);
+	int (*write_r8d16) (void *client, u8 reg, u16 val);
+};
+
+struct ad_dpot_bus_data {
+	void *client;
+	const struct ad_dpot_bus_ops *bops;
+};
+
+struct ad_dpot_id {
+	char *name;
+	unsigned long devid;
+};
+
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_remove(struct device *dev);
+
+#endif
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a441aad..3b7ab20 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5162,13 +5162,6 @@
 	enable_MAC(ai, 1);
 }
 
-static inline u8 hexVal(char c) {
-	if (c>='0' && c<='9') return c -= '0';
-	if (c>='a' && c<='f') return c -= 'a'-10;
-	if (c>='A' && c<='F') return c -= 'A'-10;
-	return 0;
-}
-
 static void proc_APList_on_close( struct inode *inode, struct file *file ) {
 	struct proc_data *data = (struct proc_data *)file->private_data;
 	struct proc_dir_entry *dp = PDE(inode);
@@ -5188,11 +5181,11 @@
 			switch(j%3) {
 			case 0:
 				APList_rid.ap[i][j/3]=
-					hexVal(data->wbuffer[j+i*6*3])<<4;
+					hex_to_bin(data->wbuffer[j+i*6*3])<<4;
 				break;
 			case 1:
 				APList_rid.ap[i][j/3]|=
-					hexVal(data->wbuffer[j+i*6*3]);
+					hex_to_bin(data->wbuffer[j+i*6*3]);
 				break;
 			}
 		}
@@ -5340,10 +5333,10 @@
 	for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
 		switch(i%3) {
 		case 0:
-			key[i/3] = hexVal(data->wbuffer[i+j])<<4;
+			key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
 			break;
 		case 1:
-			key[i/3] |= hexVal(data->wbuffer[i+j]);
+			key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
 			break;
 		}
 	}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 6a86cdf..9d30eeb 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -179,14 +179,16 @@
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct power_supply *psy = dev_get_drvdata(dev);
+	mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
 	int i;
 
+	if (attrno == POWER_SUPPLY_PROP_TYPE)
+		return mode;
+
 	for (i = 0; i < psy->num_properties; i++) {
 		int property = psy->properties[i];
 
 		if (property == attrno) {
-			mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
-
 			if (psy->property_is_writeable &&
 			    psy->property_is_writeable(psy, property) > 0)
 				mode |= S_IWUSR;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 50ac047..f159832 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -640,7 +640,7 @@
 
 config RTC_DRV_S3C
 	tristate "Samsung S3C series SoC RTC"
-	depends on ARCH_S3C2410
+	depends on ARCH_S3C2410 || ARCH_S3C64XX
 	help
 	  RTC (Realtime Clock) driver for the clock inbuilt into the
 	  Samsung S3C24XX series of SoCs. This can provide periodic
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 96e8e70..11b8ea2 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -719,6 +719,9 @@
 		}
 	}
 
+	cmos_rtc.dev = dev;
+	dev_set_drvdata(dev, &cmos_rtc);
+
 	cmos_rtc.rtc = rtc_device_register(driver_name, dev,
 				&cmos_rtc_ops, THIS_MODULE);
 	if (IS_ERR(cmos_rtc.rtc)) {
@@ -726,8 +729,6 @@
 		goto cleanup0;
 	}
 
-	cmos_rtc.dev = dev;
-	dev_set_drvdata(dev, &cmos_rtc);
 	rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
 
 	spin_lock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 532acf9..359d1e0 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -16,7 +16,6 @@
 #include <linux/rtc.h>
 #include <linux/io.h>
 #include <linux/bcd.h>
-#include <asm/rtc.h>
 
 #define DRV_NAME	"rtc-ds1302"
 #define DRV_VERSION	"0.1.1"
@@ -34,14 +33,55 @@
 #define	RTC_ADDR_MIN	0x01		/* Address of minute register */
 #define	RTC_ADDR_SEC	0x00		/* Address of second register */
 
+#ifdef CONFIG_SH_SECUREEDGE5410
+#include <asm/rtc.h>
+#include <mach/snapgear.h>
+
 #define	RTC_RESET	0x1000
 #define	RTC_IODATA	0x0800
 #define	RTC_SCLK	0x0400
 
-#ifdef CONFIG_SH_SECUREEDGE5410
-#include <mach/snapgear.h>
 #define set_dp(x)	SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
 #define get_dp()	SECUREEDGE_READ_IOPORT()
+#define ds1302_set_tx()
+#define ds1302_set_rx()
+
+static inline int ds1302_hw_init(void)
+{
+	return 0;
+}
+
+static inline void ds1302_reset(void)
+{
+	set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+}
+
+static inline void ds1302_clock(void)
+{
+	set_dp(get_dp() | RTC_SCLK);	/* clock high */
+	set_dp(get_dp() & ~RTC_SCLK);	/* clock low */
+}
+
+static inline void ds1302_start(void)
+{
+	set_dp(get_dp() | RTC_RESET);
+}
+
+static inline void ds1302_stop(void)
+{
+	set_dp(get_dp() & ~RTC_RESET);
+}
+
+static inline void ds1302_txbit(int bit)
+{
+	set_dp((get_dp() & ~RTC_IODATA) | (bit ? RTC_IODATA : 0));
+}
+
+static inline int ds1302_rxbit(void)
+{
+	return !!(get_dp() & RTC_IODATA);
+}
+
 #else
 #error "Add support for your platform"
 #endif
@@ -50,11 +90,11 @@
 {
 	int i;
 
+	ds1302_set_tx();
+
 	for (i = 8; (i); i--, val >>= 1) {
-		set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ?
-			RTC_IODATA : 0));
-		set_dp(get_dp() | RTC_SCLK);	/* clock high */
-		set_dp(get_dp() & ~RTC_SCLK);	/* clock low */
+		ds1302_txbit(val & 0x1);
+		ds1302_clock();
 	}
 }
 
@@ -63,10 +103,11 @@
 	unsigned int val;
 	int i;
 
+	ds1302_set_rx();
+
 	for (i = 0, val = 0; (i < 8); i++) {
-		val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i);
-		set_dp(get_dp() | RTC_SCLK);	/* clock high */
-		set_dp(get_dp() & ~RTC_SCLK);	/* clock low */
+		val |= (ds1302_rxbit() << i);
+		ds1302_clock();
 	}
 
 	return val;
@@ -76,23 +117,24 @@
 {
 	unsigned int val;
 
-	set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+	ds1302_reset();
 
-	set_dp(get_dp() | RTC_RESET);
+	ds1302_start();
 	ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
 	val = ds1302_recvbits();
-	set_dp(get_dp() & ~RTC_RESET);
+	ds1302_stop();
 
 	return val;
 }
 
 static void ds1302_writebyte(unsigned int addr, unsigned int val)
 {
-	set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
-	set_dp(get_dp() | RTC_RESET);
+	ds1302_reset();
+
+	ds1302_start();
 	ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
 	ds1302_sendbits(val);
-	set_dp(get_dp() & ~RTC_RESET);
+	ds1302_stop();
 }
 
 static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -167,13 +209,20 @@
 {
 	struct rtc_device *rtc;
 
+	if (ds1302_hw_init()) {
+		dev_err(&pdev->dev, "Failed to init communication channel");
+		return -EINVAL;
+	}
+
 	/* Reset */
-	set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+	ds1302_reset();
 
 	/* Write a magic value to the DS1302 RAM, and see if it sticks. */
 	ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
-	if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42)
+	if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) {
+		dev_err(&pdev->dev, "Failed to probe");
 		return -ENODEV;
+	}
 
 	rtc = rtc_device_register("ds1302", &pdev->dev,
 					   &ds1302_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 054e052..468200c 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -462,39 +462,16 @@
 static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
 		   isl1208_sysfs_store_usr);
 
-static int
-isl1208_sysfs_register(struct device *dev)
-{
-	int err;
+static struct attribute *isl1208_rtc_attrs[] = {
+	&dev_attr_atrim.attr,
+	&dev_attr_dtrim.attr,
+	&dev_attr_usr.attr,
+	NULL
+};
 
-	err = device_create_file(dev, &dev_attr_atrim);
-	if (err)
-		return err;
-
-	err = device_create_file(dev, &dev_attr_dtrim);
-	if (err) {
-		device_remove_file(dev, &dev_attr_atrim);
-		return err;
-	}
-
-	err = device_create_file(dev, &dev_attr_usr);
-	if (err) {
-		device_remove_file(dev, &dev_attr_atrim);
-		device_remove_file(dev, &dev_attr_dtrim);
-	}
-
-	return 0;
-}
-
-static int
-isl1208_sysfs_unregister(struct device *dev)
-{
-	device_remove_file(dev, &dev_attr_dtrim);
-	device_remove_file(dev, &dev_attr_atrim);
-	device_remove_file(dev, &dev_attr_usr);
-
-	return 0;
-}
+static const struct attribute_group isl1208_rtc_sysfs_files = {
+	.attrs	= isl1208_rtc_attrs,
+};
 
 static int
 isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -529,7 +506,7 @@
 		dev_warn(&client->dev, "rtc power failure detected, "
 			 "please set clock.\n");
 
-	rc = isl1208_sysfs_register(&client->dev);
+	rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
 	if (rc)
 		goto exit_unregister;
 
@@ -546,7 +523,7 @@
 {
 	struct rtc_device *rtc = i2c_get_clientdata(client);
 
-	isl1208_sysfs_unregister(&client->dev);
+	sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
 	rtc_device_unregister(rtc);
 
 	return 0;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index d71fe61..25ec921 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -379,7 +379,6 @@
 
 static int __init mxc_rtc_probe(struct platform_device *pdev)
 {
-	struct clk *clk;
 	struct resource *res;
 	struct rtc_device *rtc;
 	struct rtc_plat_data *pdata = NULL;
@@ -402,14 +401,15 @@
 	pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
 				     resource_size(res));
 
-	clk = clk_get(&pdev->dev, "ckil");
-	if (IS_ERR(clk)) {
-		ret = PTR_ERR(clk);
+	pdata->clk = clk_get(&pdev->dev, "rtc");
+	if (IS_ERR(pdata->clk)) {
+		dev_err(&pdev->dev, "unable to get clock!\n");
+		ret = PTR_ERR(pdata->clk);
 		goto exit_free_pdata;
 	}
 
-	rate = clk_get_rate(clk);
-	clk_put(clk);
+	clk_enable(pdata->clk);
+	rate = clk_get_rate(pdata->clk);
 
 	if (rate == 32768)
 		reg = RTC_INPUT_CLK_32768HZ;
@@ -420,7 +420,7 @@
 	else {
 		dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
 		ret = -EINVAL;
-		goto exit_free_pdata;
+		goto exit_put_clk;
 	}
 
 	reg |= RTC_ENABLE_BIT;
@@ -428,18 +428,9 @@
 	if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
 		dev_err(&pdev->dev, "hardware module can't be enabled!\n");
 		ret = -EIO;
-		goto exit_free_pdata;
+		goto exit_put_clk;
 	}
 
-	pdata->clk = clk_get(&pdev->dev, "rtc");
-	if (IS_ERR(pdata->clk)) {
-		dev_err(&pdev->dev, "unable to get clock!\n");
-		ret = PTR_ERR(pdata->clk);
-		goto exit_free_pdata;
-	}
-
-	clk_enable(pdata->clk);
-
 	rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
 				  THIS_MODULE);
 	if (IS_ERR(rtc)) {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4969b60..e5972b2 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -29,6 +29,11 @@
 #include <asm/irq.h>
 #include <plat/regs-rtc.h>
 
+enum s3c_cpu_type {
+	TYPE_S3C2410,
+	TYPE_S3C64XX,
+};
+
 /* I have yet to find an S3C implementation with more than one
  * of these rtc blocks in */
 
@@ -37,6 +42,7 @@
 static void __iomem *s3c_rtc_base;
 static int s3c_rtc_alarmno = NO_IRQ;
 static int s3c_rtc_tickno  = NO_IRQ;
+static enum s3c_cpu_type s3c_rtc_cpu_type;
 
 static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
 
@@ -80,12 +86,25 @@
 	pr_debug("%s: pie=%d\n", __func__, enabled);
 
 	spin_lock_irq(&s3c_rtc_pie_lock);
-	tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
 
-	if (enabled)
-		tmp |= S3C2410_TICNT_ENABLE;
+	if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+		tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+		tmp &= ~S3C64XX_RTCCON_TICEN;
 
-	writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
+		if (enabled)
+			tmp |= S3C64XX_RTCCON_TICEN;
+
+		writeb(tmp, s3c_rtc_base + S3C2410_RTCCON);
+	} else {
+		tmp = readb(s3c_rtc_base + S3C2410_TICNT);
+		tmp &= ~S3C2410_TICNT_ENABLE;
+
+		if (enabled)
+			tmp |= S3C2410_TICNT_ENABLE;
+
+		writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
+	}
+
 	spin_unlock_irq(&s3c_rtc_pie_lock);
 
 	return 0;
@@ -93,15 +112,21 @@
 
 static int s3c_rtc_setfreq(struct device *dev, int freq)
 {
-	unsigned int tmp;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+	unsigned int tmp = 0;
 
 	if (!is_power_of_2(freq))
 		return -EINVAL;
 
 	spin_lock_irq(&s3c_rtc_pie_lock);
 
-	tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
-	tmp |= (128 / freq)-1;
+	if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+		tmp = readb(s3c_rtc_base + S3C2410_TICNT);
+		tmp &= S3C2410_TICNT_ENABLE;
+	}
+
+	tmp |= (rtc_dev->max_user_freq / freq)-1;
 
 	writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
 	spin_unlock_irq(&s3c_rtc_pie_lock);
@@ -283,10 +308,17 @@
 
 static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-	unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
+	unsigned int ticnt;
 
-	seq_printf(seq, "periodic_IRQ\t: %s\n",
-		     (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
+	if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+		ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+		ticnt &= S3C64XX_RTCCON_TICEN;
+	} else {
+		ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
+		ticnt &= S3C2410_TICNT_ENABLE;
+	}
+
+	seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt  ? "yes" : "no");
 	return 0;
 }
 
@@ -353,10 +385,16 @@
 
 	if (!en) {
 		tmp = readb(base + S3C2410_RTCCON);
-		writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
+		if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+			tmp &= ~S3C64XX_RTCCON_TICEN;
+		tmp &= ~S3C2410_RTCCON_RTCEN;
+		writeb(tmp, base + S3C2410_RTCCON);
 
-		tmp = readb(base + S3C2410_TICNT);
-		writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
+		if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+			tmp = readb(base + S3C2410_TICNT);
+			tmp &= ~S3C2410_TICNT_ENABLE;
+			writeb(tmp, base + S3C2410_TICNT);
+		}
 	} else {
 		/* re-enable the device, and check it is ok */
 
@@ -472,7 +510,12 @@
 		goto err_nortc;
 	}
 
-	rtc->max_user_freq = 128;
+	if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+		rtc->max_user_freq = 32768;
+	else
+		rtc->max_user_freq = 128;
+
+	s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
 
 	platform_set_drvdata(pdev, rtc);
 	return 0;
@@ -492,20 +535,30 @@
 
 /* RTC Power management control */
 
-static int ticnt_save;
+static int ticnt_save, ticnt_en_save;
 
 static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	/* save TICNT for anyone using periodic interrupts */
 	ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
+	if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+		ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+		ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+	}
 	s3c_rtc_enable(pdev, 0);
 	return 0;
 }
 
 static int s3c_rtc_resume(struct platform_device *pdev)
 {
+	unsigned int tmp;
+
 	s3c_rtc_enable(pdev, 1);
 	writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+	if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
+		tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+		writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+	}
 	return 0;
 }
 #else
@@ -513,13 +566,27 @@
 #define s3c_rtc_resume  NULL
 #endif
 
-static struct platform_driver s3c2410_rtc_driver = {
+static struct platform_device_id s3c_rtc_driver_ids[] = {
+	{
+		.name		= "s3c2410-rtc",
+		.driver_data	= TYPE_S3C2410,
+	}, {
+		.name		= "s3c64xx-rtc",
+		.driver_data	= TYPE_S3C64XX,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids);
+
+static struct platform_driver s3c_rtc_driver = {
 	.probe		= s3c_rtc_probe,
 	.remove		= __devexit_p(s3c_rtc_remove),
 	.suspend	= s3c_rtc_suspend,
 	.resume		= s3c_rtc_resume,
+	.id_table	= s3c_rtc_driver_ids,
 	.driver		= {
-		.name	= "s3c2410-rtc",
+		.name	= "s3c-rtc",
 		.owner	= THIS_MODULE,
 	},
 };
@@ -529,12 +596,12 @@
 static int __init s3c_rtc_init(void)
 {
 	printk(banner);
-	return platform_driver_register(&s3c2410_rtc_driver);
+	return platform_driver_register(&s3c_rtc_driver);
 }
 
 static void __exit s3c_rtc_exit(void)
 {
-	platform_driver_unregister(&s3c2410_rtc_driver);
+	platform_driver_unregister(&s3c_rtc_driver);
 }
 
 module_init(s3c_rtc_init);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index b16cfe5..82931dc 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -449,17 +449,17 @@
 		goto err;
 	}
 
-	ret = wm831x_request_irq(wm831x, per_irq, wm831x_per_irq,
-				 IRQF_TRIGGER_RISING, "wm831x_rtc_per",
-				 wm831x_rtc);
+	ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
+				   IRQF_TRIGGER_RISING, "RTC period",
+				   wm831x_rtc);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
 			per_irq, ret);
 	}
 
-	ret = wm831x_request_irq(wm831x, alm_irq, wm831x_alm_irq,
-				 IRQF_TRIGGER_RISING, "wm831x_rtc_alm",
-				 wm831x_rtc);
+	ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
+				   IRQF_TRIGGER_RISING, "RTC alarm",
+				   wm831x_rtc);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
 			alm_irq, ret);
@@ -478,8 +478,8 @@
 	int per_irq = platform_get_irq_byname(pdev, "PER");
 	int alm_irq = platform_get_irq_byname(pdev, "ALM");
 
-	wm831x_free_irq(wm831x_rtc->wm831x, alm_irq, wm831x_rtc);
-	wm831x_free_irq(wm831x_rtc->wm831x, per_irq, wm831x_rtc);
+	free_irq(alm_irq, wm831x_rtc);
+	free_irq(per_irq, wm831x_rtc);
 	rtc_device_unregister(wm831x_rtc->rtc);
 	kfree(wm831x_rtc);
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9276121..44a0759 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -688,7 +688,7 @@
 	}
 
 	if (!lport->vport)
-		fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
+		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
 
 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
 		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b830d61..0ec1ed3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3757,7 +3757,7 @@
 		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
 			ioc->config_cmds.status |= MPT2_CMD_RESET;
 			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
-			ioc->config_cmds.smid = USHORT_MAX;
+			ioc->config_cmds.smid = USHRT_MAX;
 			complete(&ioc->config_cmds.done);
 		}
 		break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index e762dd3..c654429 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -258,7 +258,7 @@
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	_config_display_some_debug(ioc, smid, "config_done", mpi_reply);
 #endif
-	ioc->config_cmds.smid = USHORT_MAX;
+	ioc->config_cmds.smid = USHRT_MAX;
 	complete(&ioc->config_cmds.done);
 	return 1;
 }
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 78ed24b..3046386 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -1437,7 +1437,7 @@
 	for (i = 0; i < ARRAY_SIZE(baud_table); i++)
 		if (baud_table[i] == n)
 			break;
-	if (i < BAUD_TABLE_SIZE) {
+	if (i < ARRAY_SIZE(baud_table)) {
 		m68328_console_baud = n;
 		m68328_console_cbaud = 0;
 		if (i > 15) {
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 21a95ff..a090385 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -2810,17 +2810,6 @@
 }
 
 /* IRQL = PASSIVE_LEVEL */
-u8 BtoH(char ch)
-{
-	if (ch >= '0' && ch <= '9')
-		return (ch - '0');	/* Handle numerals */
-	if (ch >= 'A' && ch <= 'F')
-		return (ch - 'A' + 0xA);	/* Handle capitol hex digits */
-	if (ch >= 'a' && ch <= 'f')
-		return (ch - 'a' + 0xA);	/* Handle small hex digits */
-	return (255);
-}
-
 /* */
 /*  FUNCTION: AtoH(char *, u8 *, int) */
 /* */
@@ -2847,8 +2836,8 @@
 	destTemp = (u8 *)dest;
 
 	while (destlen--) {
-		*destTemp = BtoH(*srcptr++) << 4;	/* Put 1st ascii byte in upper nibble. */
-		*destTemp += BtoH(*srcptr++);	/* Add 2nd ascii byte to above. */
+		*destTemp = hex_to_bin(*srcptr++) << 4;	/* Put 1st ascii byte in upper nibble. */
+		*destTemp += hex_to_bin(*srcptr++);	/* Add 2nd ascii byte to above. */
 		destTemp++;
 	}
 }
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index ab525ee..82b6e78 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -2356,8 +2356,6 @@
 
 void AtoH(char *src, u8 *dest, int destlen);
 
-u8 BtoH(char ch);
-
 void RTMPPatchMacBbpBug(struct rt_rtmp_adapter *pAd);
 
 void RTMPInitTimer(struct rt_rtmp_adapter *pAd,
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 1e9ba4b..1335456 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -127,8 +127,6 @@
 #define ENDPOINT_ISOC_DATA	0x07
 #define ENDPOINT_FIRMWARE	0x05
 
-#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
-
 struct speedtch_params {
 	unsigned int altsetting;
 	unsigned int BMaxDSL;
@@ -669,7 +667,8 @@
 	memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
 	if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
 		for (i = 0; i < 6; i++)
-			atm_dev->esi[i] = (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1]));
+			atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
+				hex_to_bin(mac_str[i * 2 + 1]);
 	}
 
 	/* Start modem synchronisation */
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 750effe..c6fb8e9 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -806,7 +806,7 @@
 	count = indirect->len / sizeof desc;
 	/* Buffers are chained via a 16 bit next field, so
 	 * we can have at most 2^16 of these. */
-	if (count > USHORT_MAX + 1) {
+	if (count > USHRT_MAX + 1) {
 		vq_err(vq, "Indirect buffer length too big: %d\n",
 		       indirect->len);
 		return -E2BIG;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 8d406fb..f3d7440 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -80,7 +80,7 @@
 	spinlock_t lock;
 };
 
-static struct fb_fix_screeninfo arcfb_fix __initdata = {
+static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
 	.id =		"arcfb",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_MONO01,
@@ -90,7 +90,7 @@
 	.accel =	FB_ACCEL_NONE,
 };
 
-static struct fb_var_screeninfo arcfb_var __initdata = {
+static struct fb_var_screeninfo arcfb_var __devinitdata = {
 	.xres		= 128,
 	.yres		= 64,
 	.xres_virtual	= 128,
@@ -588,7 +588,7 @@
 	return retval;
 }
 
-static int arcfb_remove(struct platform_device *dev)
+static int __devexit arcfb_remove(struct platform_device *dev)
 {
 	struct fb_info *info = platform_get_drvdata(dev);
 
@@ -602,7 +602,7 @@
 
 static struct platform_driver arcfb_driver = {
 	.probe	= arcfb_probe,
-	.remove = arcfb_remove,
+	.remove = __devexit_p(arcfb_remove),
 	.driver	= {
 		.name	= "arcfb",
 	},
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 29d7285..f8d69ad 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1820,10 +1820,6 @@
 #define ATYIO_FEATW		0x41545903	/* ATY\03 */
 #endif
 
-#ifndef FBIO_WAITFORVSYNC
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
-#endif
-
 static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
 {
 	struct atyfb_par *par = (struct atyfb_par *) info->par;
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 2baac7c..c8e1f04 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -61,47 +61,13 @@
 #define LCD_X_RES		320	/* Horizontal Resolution */
 #define LCD_Y_RES		240	/* Vertical Resolution */
 #define	DMA_BUS_SIZE		16
+#define U_LINE			4	/* Blanking Lines */
 
-#define USE_RGB565_16_BIT_PPI
-
-#ifdef USE_RGB565_16_BIT_PPI
-#define LCD_BPP		16	/* Bit Per Pixel */
-#define CLOCKS_PER_PIX	1
-#define CPLD_PIPELINE_DELAY_COR 0	/* NO CPLB */
-#endif
 
 /* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
  * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
  */
 
-#ifdef USE_RGB565_8_BIT_PPI
-#define LCD_BPP		16	/* Bit Per Pixel */
-#define CLOCKS_PER_PIX	2
-#define CPLD_PIPELINE_DELAY_COR 3	/* RGB565 */
-#endif
-
-#ifdef USE_RGB888_8_BIT_PPI
-#define LCD_BPP		24	/* Bit Per Pixel */
-#define CLOCKS_PER_PIX	3
-#define CPLD_PIPELINE_DELAY_COR 5	/* RGB888 */
-#endif
-
-	/*
-	 * HS and VS timing parameters (all in number of PPI clk ticks)
-	 */
-
-#define U_LINE		4				/* Blanking Lines */
-
-#define H_ACTPIX	(LCD_X_RES * CLOCKS_PER_PIX)	/* active horizontal pixel */
-#define H_PERIOD	(336 * CLOCKS_PER_PIX)		/* HS period */
-#define H_PULSE		(2 * CLOCKS_PER_PIX)				/* HS pulse width */
-#define H_START		(7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR)	/* first valid pixel */
-
-#define	V_LINES		(LCD_Y_RES + U_LINE)		/* total vertical lines */
-#define V_PULSE		(2 * CLOCKS_PER_PIX)		/* VS pulse width (1-5 H_PERIODs) */
-#define V_PERIOD	(H_PERIOD * V_LINES)		/* VS period */
-
-#define ACTIVE_VIDEO_MEM_OFFSET		((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
 
 #define BFIN_LCD_NBR_PALETTE_ENTRIES	256
 
@@ -110,12 +76,6 @@
 #define PPI_PORT_CFG_01			0x10
 #define PPI_POLS_1			0x8000
 
-#if (CLOCKS_PER_PIX > 1)
-#define PPI_PMODE (DLEN_8 | PACK_EN)
-#else
-#define PPI_PMODE (DLEN_16)
-#endif
-
 #define LQ035_INDEX			0x74
 #define LQ035_DATA			0x76
 
@@ -139,6 +99,15 @@
 	int irq;
 	spinlock_t lock;	/* lock */
 	u32 pseudo_pal[16];
+
+	u32 lcd_bpp;
+	u32 h_actpix;
+	u32 h_period;
+	u32 h_pulse;
+	u32 h_start;
+	u32 v_lines;
+	u32 v_pulse;
+	u32 v_period;
 };
 
 static int nocursor;
@@ -234,16 +203,69 @@
 	return 0;
 }
 
+static int bfin_lq035q1_calc_timing(struct bfin_lq035q1fb_info *fbi)
+{
+	unsigned long clocks_per_pix, cpld_pipeline_delay_cor;
+
+	/*
+	 * Interface 16/18-bit TFT over an 8-bit wide PPI using a small
+	 * Programmable Logic Device (CPLD)
+	 * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
+	 */
+
+	switch (fbi->disp_info->ppi_mode) {
+	case USE_RGB565_16_BIT_PPI:
+		fbi->lcd_bpp = 16;
+		clocks_per_pix = 1;
+		cpld_pipeline_delay_cor = 0;
+		break;
+	case USE_RGB565_8_BIT_PPI:
+		fbi->lcd_bpp = 16;
+		clocks_per_pix = 2;
+		cpld_pipeline_delay_cor = 3;
+		break;
+	case USE_RGB888_8_BIT_PPI:
+		fbi->lcd_bpp = 24;
+		clocks_per_pix = 3;
+		cpld_pipeline_delay_cor = 5;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * HS and VS timing parameters (all in number of PPI clk ticks)
+	 */
+
+	fbi->h_actpix = (LCD_X_RES * clocks_per_pix);	/* active horizontal pixel */
+	fbi->h_period = (336 * clocks_per_pix);		/* HS period */
+	fbi->h_pulse = (2 * clocks_per_pix);				/* HS pulse width */
+	fbi->h_start = (7 * clocks_per_pix + cpld_pipeline_delay_cor);	/* first valid pixel */
+
+	fbi->v_lines = (LCD_Y_RES + U_LINE);		/* total vertical lines */
+	fbi->v_pulse = (2 * clocks_per_pix);		/* VS pulse width (1-5 H_PERIODs) */
+	fbi->v_period =	(fbi->h_period * fbi->v_lines);	/* VS period */
+
+	return 0;
+}
+
 static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
 {
-	bfin_write_PPI_DELAY(H_START);
-	bfin_write_PPI_COUNT(H_ACTPIX - 1);
-	bfin_write_PPI_FRAME(V_LINES);
+	unsigned ppi_pmode;
+
+	if (fbi->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI)
+		ppi_pmode = DLEN_16;
+	else
+		ppi_pmode = (DLEN_8 | PACK_EN);
+
+	bfin_write_PPI_DELAY(fbi->h_start);
+	bfin_write_PPI_COUNT(fbi->h_actpix - 1);
+	bfin_write_PPI_FRAME(fbi->v_lines);
 
 	bfin_write_PPI_CONTROL(PPI_TX_MODE |	   /* output mode , PORT_DIR */
 				PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
 				PPI_PORT_CFG_01 |  /* two frame sync PORT_CFG */
-				PPI_PMODE |	   /* 8/16 bit data length / PACK_EN? */
+				ppi_pmode |	   /* 8/16 bit data length / PACK_EN? */
 				PPI_POLS_1);	   /* faling edge syncs POLS */
 }
 
@@ -272,19 +294,19 @@
 
 }
 
-static void bfin_lq035q1_init_timers(void)
+static void bfin_lq035q1_init_timers(struct bfin_lq035q1fb_info *fbi)
 {
 
 	bfin_lq035q1_stop_timers();
 
-	set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
-	set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
+	set_gptimer_period(TIMER_HSYNC_id, fbi->h_period);
+	set_gptimer_pwidth(TIMER_HSYNC_id, fbi->h_pulse);
 	set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
 				      TIMER_TIN_SEL | TIMER_CLK_SEL|
 				      TIMER_EMU_RUN);
 
-	set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
-	set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
+	set_gptimer_period(TIMER_VSYNC_id, fbi->v_period);
+	set_gptimer_pwidth(TIMER_VSYNC_id, fbi->v_pulse);
 	set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
 				      TIMER_TIN_SEL | TIMER_CLK_SEL |
 				      TIMER_EMU_RUN);
@@ -294,21 +316,21 @@
 static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
 {
 
+
 	set_dma_config(CH_PPI,
 		       set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
 					   INTR_DISABLE, DIMENSION_2D,
 					   DATA_SIZE_16,
 					   DMA_NOSYNC_KEEP_DMA_BUF));
-	set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
+	set_dma_x_count(CH_PPI, (LCD_X_RES * fbi->lcd_bpp) / DMA_BUS_SIZE);
 	set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
-	set_dma_y_count(CH_PPI, V_LINES);
+	set_dma_y_count(CH_PPI, fbi->v_lines);
 
 	set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
 	set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
 
 }
 
-#if (CLOCKS_PER_PIX == 1)
 static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
 			    P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
 			    P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
@@ -316,22 +338,27 @@
 			    P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
 			    P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
 			    P_PPI0_D15, 0};
-#else
-static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+
+static const u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
 			    P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
 			    P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
 			    P_PPI0_D6, P_PPI0_D7, 0};
-#endif
 
-static inline void bfin_lq035q1_free_ports(void)
+static inline void bfin_lq035q1_free_ports(unsigned ppi16)
 {
-	peripheral_free_list(ppi0_req_16);
+	if (ppi16)
+		peripheral_free_list(ppi0_req_16);
+	else
+		peripheral_free_list(ppi0_req_8);
+
 	if (ANOMALY_05000400)
 		gpio_free(P_IDENT(P_PPI0_FS3));
 }
 
-static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
+static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev,
+						unsigned ppi16)
 {
+	int ret;
 	/* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
 	 * Drive PPI_FS3 Low
 	 */
@@ -342,7 +369,12 @@
 		gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
 	}
 
-	if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
+	if (ppi16)
+		ret = peripheral_request_list(ppi0_req_16, DRIVER_NAME);
+	else
+		ret = peripheral_request_list(ppi0_req_8, DRIVER_NAME);
+
+	if (ret) {
 		dev_err(&pdev->dev, "requesting peripherals failed\n");
 		return -EFAULT;
 	}
@@ -364,7 +396,7 @@
 
 		bfin_lq035q1_config_dma(fbi);
 		bfin_lq035q1_config_ppi(fbi);
-		bfin_lq035q1_init_timers();
+		bfin_lq035q1_init_timers(fbi);
 
 		/* start dma */
 		enable_dma(CH_PPI);
@@ -402,12 +434,9 @@
 static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
 				     struct fb_info *info)
 {
-	switch (var->bits_per_pixel) {
-#if (LCD_BPP == 24)
-	case 24:/* TRUECOLOUR, 16m */
-#else
-	case 16:/* DIRECTCOLOUR, 64k */
-#endif
+	struct bfin_lq035q1fb_info *fbi = info->par;
+
+	if (var->bits_per_pixel == fbi->lcd_bpp) {
 		var->red.offset = info->var.red.offset;
 		var->green.offset = info->var.green.offset;
 		var->blue.offset = info->var.blue.offset;
@@ -420,8 +449,7 @@
 		var->red.msb_right = 0;
 		var->green.msb_right = 0;
 		var->blue.msb_right = 0;
-		break;
-	default:
+	} else {
 		pr_debug("%s: depth not supported: %u BPP\n", __func__,
 			 var->bits_per_pixel);
 		return -EINVAL;
@@ -528,6 +556,7 @@
 {
 	struct bfin_lq035q1fb_info *info;
 	struct fb_info *fbinfo;
+	u32 active_video_mem_offset;
 	int ret;
 
 	ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
@@ -550,6 +579,12 @@
 
 	platform_set_drvdata(pdev, fbinfo);
 
+	ret = bfin_lq035q1_calc_timing(info);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed PPI Mode\n");
+		goto out3;
+	}
+
 	strcpy(fbinfo->fix.id, DRIVER_NAME);
 
 	fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -571,46 +606,48 @@
 	fbinfo->var.xres_virtual = LCD_X_RES;
 	fbinfo->var.yres = LCD_Y_RES;
 	fbinfo->var.yres_virtual = LCD_Y_RES;
-	fbinfo->var.bits_per_pixel = LCD_BPP;
+	fbinfo->var.bits_per_pixel = info->lcd_bpp;
 
 	if (info->disp_info->mode & LQ035_BGR) {
-#if (LCD_BPP == 24)
-		fbinfo->var.red.offset = 0;
-		fbinfo->var.green.offset = 8;
-		fbinfo->var.blue.offset = 16;
-#else
-		fbinfo->var.red.offset = 0;
-		fbinfo->var.green.offset = 5;
-		fbinfo->var.blue.offset = 11;
-#endif
+		if (info->lcd_bpp == 24) {
+			fbinfo->var.red.offset = 0;
+			fbinfo->var.green.offset = 8;
+			fbinfo->var.blue.offset = 16;
+		} else {
+			fbinfo->var.red.offset = 0;
+			fbinfo->var.green.offset = 5;
+			fbinfo->var.blue.offset = 11;
+		}
 	} else {
-#if (LCD_BPP == 24)
-		fbinfo->var.red.offset = 16;
-		fbinfo->var.green.offset = 8;
-		fbinfo->var.blue.offset = 0;
-#else
-		fbinfo->var.red.offset = 11;
-		fbinfo->var.green.offset = 5;
-		fbinfo->var.blue.offset = 0;
-#endif
+		if (info->lcd_bpp == 24) {
+			fbinfo->var.red.offset = 16;
+			fbinfo->var.green.offset = 8;
+			fbinfo->var.blue.offset = 0;
+		} else {
+			fbinfo->var.red.offset = 11;
+			fbinfo->var.green.offset = 5;
+			fbinfo->var.blue.offset = 0;
+		}
 	}
 
 	fbinfo->var.transp.offset = 0;
 
-#if (LCD_BPP == 24)
-	fbinfo->var.red.length = 8;
-	fbinfo->var.green.length = 8;
-	fbinfo->var.blue.length = 8;
-#else
-	fbinfo->var.red.length = 5;
-	fbinfo->var.green.length = 6;
-	fbinfo->var.blue.length = 5;
-#endif
+	if (info->lcd_bpp == 24) {
+		fbinfo->var.red.length = 8;
+		fbinfo->var.green.length = 8;
+		fbinfo->var.blue.length = 8;
+	} else {
+		fbinfo->var.red.length = 5;
+		fbinfo->var.green.length = 6;
+		fbinfo->var.blue.length = 5;
+	}
 
 	fbinfo->var.transp.length = 0;
 
-	fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
-				+ ACTIVE_VIDEO_MEM_OFFSET;
+	active_video_mem_offset = ((U_LINE / 2) * LCD_X_RES * (info->lcd_bpp / 8));
+
+	fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * info->lcd_bpp / 8
+				+ active_video_mem_offset;
 
 	fbinfo->fix.line_length = fbinfo->var.xres_virtual *
 	    fbinfo->var.bits_per_pixel / 8;
@@ -629,8 +666,8 @@
 		goto out3;
 	}
 
-	fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
-	fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+	fbinfo->screen_base = (void *)info->fb_buffer + active_video_mem_offset;
+	fbinfo->fix.smem_start = (int)info->fb_buffer + active_video_mem_offset;
 
 	fbinfo->fbops = &bfin_lq035q1_fb_ops;
 
@@ -643,7 +680,8 @@
 		goto out4;
 	}
 
-	ret = bfin_lq035q1_request_ports(pdev);
+	ret = bfin_lq035q1_request_ports(pdev,
+			info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI);
 	if (ret) {
 		dev_err(&pdev->dev, "couldn't request gpio port\n");
 		goto out6;
@@ -693,7 +731,7 @@
 	}
 
 	dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
-		LCD_X_RES, LCD_Y_RES, LCD_BPP);
+		LCD_X_RES, LCD_Y_RES, info->lcd_bpp);
 
 	return 0;
 
@@ -705,7 +743,8 @@
  out8:
 	free_irq(info->irq, info);
  out7:
-	bfin_lq035q1_free_ports();
+	bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
+				USE_RGB565_16_BIT_PPI);
  out6:
 	fb_dealloc_cmap(&fbinfo->cmap);
  out4:
@@ -742,7 +781,8 @@
 
 	fb_dealloc_cmap(&fbinfo->cmap);
 
-	bfin_lq035q1_free_ports();
+	bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
+				USE_RGB565_16_BIT_PPI);
 
 	platform_set_drvdata(pdev, NULL);
 	framebuffer_release(fbinfo);
@@ -781,7 +821,7 @@
 
 		bfin_lq035q1_config_dma(info);
 		bfin_lq035q1_config_ppi(info);
-		bfin_lq035q1_init_timers();
+		bfin_lq035q1_init_timers(info);
 
 		/* start dma */
 		enable_dma(CH_PPI);
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 8d244ba..cad7d45 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -36,7 +36,9 @@
 #define DRIVER_NAME "da8xx_lcdc"
 
 /* LCD Status Register */
+#define LCD_END_OF_FRAME1		BIT(9)
 #define LCD_END_OF_FRAME0		BIT(8)
+#define LCD_PL_LOAD_DONE		BIT(6)
 #define LCD_FIFO_UNDERFLOW		BIT(5)
 #define LCD_SYNC_LOST			BIT(2)
 
@@ -58,11 +60,13 @@
 #define LCD_PALETTE_LOAD_MODE(x)	((x) << 20)
 #define PALETTE_AND_DATA		0x00
 #define PALETTE_ONLY			0x01
+#define DATA_ONLY			0x02
 
 #define LCD_MONO_8BIT_MODE		BIT(9)
 #define LCD_RASTER_ORDER		BIT(8)
 #define LCD_TFT_MODE			BIT(7)
 #define LCD_UNDERFLOW_INT_ENA		BIT(6)
+#define LCD_PL_ENABLE			BIT(4)
 #define LCD_MONOCHROME_MODE		BIT(1)
 #define LCD_RASTER_ENABLE		BIT(0)
 #define LCD_TFT_ALT_ENABLE		BIT(23)
@@ -87,6 +91,10 @@
 #define  LCD_DMA_CTRL_REG			0x40
 #define  LCD_DMA_FRM_BUF_BASE_ADDR_0_REG	0x44
 #define  LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG	0x48
+#define  LCD_DMA_FRM_BUF_BASE_ADDR_1_REG	0x4C
+#define  LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG	0x50
+
+#define LCD_NUM_BUFFERS	2
 
 #define WSI_TIMEOUT	50
 #define PALETTE_SIZE	256
@@ -111,13 +119,20 @@
 struct da8xx_fb_par {
 	resource_size_t p_palette_base;
 	unsigned char *v_palette_base;
+	dma_addr_t		vram_phys;
+	unsigned long		vram_size;
+	void			*vram_virt;
+	unsigned int		dma_start;
+	unsigned int		dma_end;
 	struct clk *lcdc_clk;
 	int irq;
 	unsigned short pseudo_palette[16];
-	unsigned int databuf_sz;
 	unsigned int palette_sz;
 	unsigned int pxl_clk;
 	int blank;
+	wait_queue_head_t	vsync_wait;
+	int			vsync_flag;
+	int			vsync_timeout;
 #ifdef CONFIG_CPU_FREQ
 	struct notifier_block	freq_transition;
 #endif
@@ -148,9 +163,9 @@
 	.type = FB_TYPE_PACKED_PIXELS,
 	.type_aux = 0,
 	.visual = FB_VISUAL_PSEUDOCOLOR,
-	.xpanstep = 1,
+	.xpanstep = 0,
 	.ypanstep = 1,
-	.ywrapstep = 1,
+	.ywrapstep = 0,
 	.accel = FB_ACCEL_NONE
 };
 
@@ -221,22 +236,48 @@
 
 static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
 {
-	u32 tmp = par->p_palette_base + par->databuf_sz - 4;
-	u32 reg;
+	u32 start;
+	u32 end;
+	u32 reg_ras;
+	u32 reg_dma;
 
-	/* Update the databuf in the hw. */
-	lcdc_write(par->p_palette_base, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
-	lcdc_write(tmp, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+	/* init reg to clear PLM (loading mode) fields */
+	reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
+	reg_ras &= ~(3 << 20);
 
-	/* Start the DMA. */
-	reg = lcdc_read(LCD_RASTER_CTRL_REG);
-	reg &= ~(3 << 20);
-	if (load_mode == LOAD_DATA)
-		reg |= LCD_PALETTE_LOAD_MODE(PALETTE_AND_DATA);
-	else if (load_mode == LOAD_PALETTE)
-		reg |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
+	reg_dma  = lcdc_read(LCD_DMA_CTRL_REG);
 
-	lcdc_write(reg, LCD_RASTER_CTRL_REG);
+	if (load_mode == LOAD_DATA) {
+		start    = par->dma_start;
+		end      = par->dma_end;
+
+		reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY);
+		reg_dma |= LCD_END_OF_FRAME_INT_ENA;
+		reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
+
+		lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+		lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+		lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+		lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+	} else if (load_mode == LOAD_PALETTE) {
+		start    = par->p_palette_base;
+		end      = start + par->palette_sz - 1;
+
+		reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
+		reg_ras |= LCD_PL_ENABLE;
+
+		lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+		lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+	}
+
+	lcdc_write(reg_dma, LCD_DMA_CTRL_REG);
+	lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
+
+	/*
+	 * The Raster enable bit must be set after all other control fields are
+	 * set.
+	 */
+	lcd_enable_raster();
 }
 
 /* Configure the Burst Size of DMA */
@@ -368,12 +409,8 @@
 static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
 		u32 bpp, u32 raster_order)
 {
-	u32 bpl, reg;
+	u32 reg;
 
-	/* Disable Dual Frame Buffer. */
-	reg = lcdc_read(LCD_DMA_CTRL_REG);
-	lcdc_write(reg & ~LCD_DUAL_FRAME_BUFFER_ENABLE,
-						LCD_DMA_CTRL_REG);
 	/* Set the Panel Width */
 	/* Pixels per line = (PPL + 1)*16 */
 	/*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/
@@ -410,9 +447,6 @@
 		return -EINVAL;
 	}
 
-	bpl = width * bpp / 8;
-	par->databuf_sz = height * bpl + par->palette_sz;
-
 	return 0;
 }
 
@@ -421,8 +455,9 @@
 			      struct fb_info *info)
 {
 	struct da8xx_fb_par *par = info->par;
-	unsigned short *palette = (unsigned short *)par->v_palette_base;
+	unsigned short *palette = (unsigned short *) par->v_palette_base;
 	u_short pal;
+	int update_hw = 0;
 
 	if (regno > 255)
 		return 1;
@@ -439,8 +474,10 @@
 		pal |= (green & 0x00f0);
 		pal |= (blue & 0x000f);
 
-		palette[regno] = pal;
-
+		if (palette[regno] != pal) {
+			update_hw = 1;
+			palette[regno] = pal;
+		}
 	} else if ((info->var.bits_per_pixel == 16) && regno < 16) {
 		red >>= (16 - info->var.red.length);
 		red <<= info->var.red.offset;
@@ -453,9 +490,16 @@
 
 		par->pseudo_palette[regno] = red | green | blue;
 
-		palette[0] = 0x4000;
+		if (palette[0] != 0x4000) {
+			update_hw = 1;
+			palette[0] = 0x4000;
+		}
 	}
 
+	/* Update the palette in the h/w as needed. */
+	if (update_hw)
+		lcd_blit(LOAD_PALETTE, par);
+
 	return 0;
 }
 
@@ -541,15 +585,54 @@
 
 static irqreturn_t lcdc_irq_handler(int irq, void *arg)
 {
+	struct da8xx_fb_par *par = arg;
 	u32 stat = lcdc_read(LCD_STAT_REG);
+	u32 reg_ras;
 
 	if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
 		lcd_disable_raster();
 		lcdc_write(stat, LCD_STAT_REG);
 		lcd_enable_raster();
-	} else
+	} else if (stat & LCD_PL_LOAD_DONE) {
+		/*
+		 * Must disable raster before changing state of any control bit.
+		 * And also must be disabled before clearing the PL loading
+		 * interrupt via the following write to the status register. If
+		 * this is done after then one gets multiple PL done interrupts.
+		 */
+		lcd_disable_raster();
+
 		lcdc_write(stat, LCD_STAT_REG);
 
+		/* Disable PL completion inerrupt */
+		reg_ras  = lcdc_read(LCD_RASTER_CTRL_REG);
+		reg_ras &= ~LCD_PL_ENABLE;
+		lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
+
+		/* Setup and start data loading mode */
+		lcd_blit(LOAD_DATA, par);
+	} else {
+		lcdc_write(stat, LCD_STAT_REG);
+
+		if (stat & LCD_END_OF_FRAME0) {
+			lcdc_write(par->dma_start,
+				   LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+			lcdc_write(par->dma_end,
+				   LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+			par->vsync_flag = 1;
+			wake_up_interruptible(&par->vsync_wait);
+		}
+
+		if (stat & LCD_END_OF_FRAME1) {
+			lcdc_write(par->dma_start,
+				   LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+			lcdc_write(par->dma_end,
+				   LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+			par->vsync_flag = 1;
+			wake_up_interruptible(&par->vsync_wait);
+		}
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -654,9 +737,10 @@
 
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
-		dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
-					info->screen_base - PAGE_SIZE,
-					info->fix.smem_start);
+		dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+				  par->p_palette_base);
+		dma_free_coherent(NULL, par->vram_size, par->vram_virt,
+				  par->vram_phys);
 		free_irq(par->irq, par);
 		clk_disable(par->lcdc_clk);
 		clk_put(par->lcdc_clk);
@@ -668,6 +752,39 @@
 	return 0;
 }
 
+/*
+ * Function to wait for vertical sync which for this LCD peripheral
+ * translates into waiting for the current raster frame to complete.
+ */
+static int fb_wait_for_vsync(struct fb_info *info)
+{
+	struct da8xx_fb_par *par = info->par;
+	int ret;
+
+	/*
+	 * Set flag to 0 and wait for isr to set to 1. It would seem there is a
+	 * race condition here where the ISR could have occured just before or
+	 * just after this set. But since we are just coarsely waiting for
+	 * a frame to complete then that's OK. i.e. if the frame completed
+	 * just before this code executed then we have to wait another full
+	 * frame time but there is no way to avoid such a situation. On the
+	 * other hand if the frame completed just after then we don't need
+	 * to wait long at all. Either way we are guaranteed to return to the
+	 * user immediately after a frame completion which is all that is
+	 * required.
+	 */
+	par->vsync_flag = 0;
+	ret = wait_event_interruptible_timeout(par->vsync_wait,
+					       par->vsync_flag != 0,
+					       par->vsync_timeout);
+	if (ret < 0)
+		return ret;
+	if (ret == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
 static int fb_ioctl(struct fb_info *info, unsigned int cmd,
 			  unsigned long arg)
 {
@@ -697,6 +814,8 @@
 					sync_arg.pulse_width,
 					sync_arg.front_porch);
 		break;
+	case FBIO_WAITFORVSYNC:
+		return fb_wait_for_vsync(info);
 	default:
 		return -EINVAL;
 	}
@@ -732,10 +851,47 @@
 	return ret;
 }
 
+/*
+ * Set new x,y offsets in the virtual display for the visible area and switch
+ * to the new mode.
+ */
+static int da8xx_pan_display(struct fb_var_screeninfo *var,
+			     struct fb_info *fbi)
+{
+	int ret = 0;
+	struct fb_var_screeninfo new_var;
+	struct da8xx_fb_par         *par = fbi->par;
+	struct fb_fix_screeninfo    *fix = &fbi->fix;
+	unsigned int end;
+	unsigned int start;
+
+	if (var->xoffset != fbi->var.xoffset ||
+			var->yoffset != fbi->var.yoffset) {
+		memcpy(&new_var, &fbi->var, sizeof(new_var));
+		new_var.xoffset = var->xoffset;
+		new_var.yoffset = var->yoffset;
+		if (fb_check_var(&new_var, fbi))
+			ret = -EINVAL;
+		else {
+			memcpy(&fbi->var, &new_var, sizeof(new_var));
+
+			start	= fix->smem_start +
+				new_var.yoffset * fix->line_length +
+				new_var.xoffset * var->bits_per_pixel / 8;
+			end	= start + var->yres * fix->line_length - 1;
+			par->dma_start	= start;
+			par->dma_end	= end;
+		}
+	}
+
+	return ret;
+}
+
 static struct fb_ops da8xx_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = fb_check_var,
 	.fb_setcolreg = fb_setcolreg,
+	.fb_pan_display = da8xx_pan_display,
 	.fb_ioctl = fb_ioctl,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
@@ -829,40 +985,53 @@
 	}
 
 	/* allocate frame buffer */
-	da8xx_fb_info->screen_base = dma_alloc_coherent(NULL,
-					par->databuf_sz + PAGE_SIZE,
-					(resource_size_t *)
-					&da8xx_fb_info->fix.smem_start,
-					GFP_KERNEL | GFP_DMA);
+	par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
+	par->vram_size = PAGE_ALIGN(par->vram_size/8);
+	par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
 
-	if (!da8xx_fb_info->screen_base) {
+	par->vram_virt = dma_alloc_coherent(NULL,
+					    par->vram_size,
+					    (resource_size_t *) &par->vram_phys,
+					    GFP_KERNEL | GFP_DMA);
+	if (!par->vram_virt) {
 		dev_err(&device->dev,
 			"GLCD: kmalloc for frame buffer failed\n");
 		ret = -EINVAL;
 		goto err_release_fb;
 	}
 
-	/* move palette base pointer by (PAGE_SIZE - palette_sz) bytes */
-	par->v_palette_base = da8xx_fb_info->screen_base +
-				(PAGE_SIZE - par->palette_sz);
-	par->p_palette_base = da8xx_fb_info->fix.smem_start +
-				(PAGE_SIZE - par->palette_sz);
+	da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
+	da8xx_fb_fix.smem_start    = par->vram_phys;
+	da8xx_fb_fix.smem_len      = par->vram_size;
+	da8xx_fb_fix.line_length   = (lcdc_info->width * lcd_cfg->bpp) / 8;
 
-	/* the rest of the frame buffer is pixel data */
-	da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz;
-	da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz;
-	da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
-	da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+	par->dma_start = par->vram_phys;
+	par->dma_end   = par->dma_start + lcdc_info->height *
+		da8xx_fb_fix.line_length - 1;
+
+	/* allocate palette buffer */
+	par->v_palette_base = dma_alloc_coherent(NULL,
+					       PALETTE_SIZE,
+					       (resource_size_t *)
+					       &par->p_palette_base,
+					       GFP_KERNEL | GFP_DMA);
+	if (!par->v_palette_base) {
+		dev_err(&device->dev,
+			"GLCD: kmalloc for palette buffer failed\n");
+		ret = -EINVAL;
+		goto err_release_fb_mem;
+	}
+	memset(par->v_palette_base, 0, PALETTE_SIZE);
 
 	par->irq = platform_get_irq(device, 0);
 	if (par->irq < 0) {
 		ret = -ENOENT;
-		goto err_release_fb_mem;
+		goto err_release_pl_mem;
 	}
 
 	ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
 	if (ret)
-		goto err_release_fb_mem;
+		goto err_release_pl_mem;
 
 	/* Initialize par */
 	da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
@@ -870,8 +1039,8 @@
 	da8xx_fb_var.xres = lcdc_info->width;
 	da8xx_fb_var.xres_virtual = lcdc_info->width;
 
-	da8xx_fb_var.yres = lcdc_info->height;
-	da8xx_fb_var.yres_virtual = lcdc_info->height;
+	da8xx_fb_var.yres         = lcdc_info->height;
+	da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS;
 
 	da8xx_fb_var.grayscale =
 	    lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
@@ -892,18 +1061,18 @@
 	ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
 	if (ret)
 		goto err_free_irq;
-
-	/* First palette_sz byte of the frame buffer is the palette */
 	da8xx_fb_info->cmap.len = par->palette_sz;
 
-	/* Flush the buffer to the screen. */
-	lcd_blit(LOAD_DATA, par);
-
 	/* initialize var_screeninfo */
 	da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
 	fb_set_var(da8xx_fb_info, &da8xx_fb_var);
 
 	dev_set_drvdata(&device->dev, da8xx_fb_info);
+
+	/* initialize the vsync wait queue */
+	init_waitqueue_head(&par->vsync_wait);
+	par->vsync_timeout = HZ / 5;
+
 	/* Register the Frame Buffer  */
 	if (register_framebuffer(da8xx_fb_info) < 0) {
 		dev_err(&device->dev,
@@ -919,10 +1088,6 @@
 		goto err_cpu_freq;
 	}
 #endif
-
-	/* enable raster engine */
-	lcd_enable_raster();
-
 	return 0;
 
 #ifdef CONFIG_CPU_FREQ
@@ -936,10 +1101,12 @@
 err_free_irq:
 	free_irq(par->irq, par);
 
+err_release_pl_mem:
+	dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+			  par->p_palette_base);
+
 err_release_fb_mem:
-	dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
-				da8xx_fb_info->screen_base - PAGE_SIZE,
-				da8xx_fb_info->fix.smem_start);
+	dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys);
 
 err_release_fb:
 	framebuffer_release(da8xx_fb_info);
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6113c47..1105a59 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,25 +155,41 @@
 {
 	struct fb_info *info = container_of(work, struct fb_info,
 						deferred_work.work);
-	struct list_head *node, *next;
-	struct page *cur;
 	struct fb_deferred_io *fbdefio = info->fbdefio;
+	struct page *page, *tmp_page;
+	struct list_head *node, *tmp_node;
+	struct list_head non_dirty;
+
+	INIT_LIST_HEAD(&non_dirty);
 
 	/* here we mkclean the pages, then do all deferred IO */
 	mutex_lock(&fbdefio->lock);
-	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
-		lock_page(cur);
-		page_mkclean(cur);
-		unlock_page(cur);
+	list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
+		lock_page(page);
+		/*
+		 * The workqueue callback can be triggered after a
+		 * ->page_mkwrite() call but before the PTE has been marked
+		 * dirty. In this case page_mkclean() won't "rearm" the page.
+		 *
+		 * To avoid this, remove those "non-dirty" pages from the
+		 * pagelist before calling the driver's callback, then add
+		 * them back to get processed on the next work iteration.
+		 * At that time, their PTEs will hopefully be dirty for real.
+		 */
+		if (!page_mkclean(page))
+			list_move_tail(&page->lru, &non_dirty);
+		unlock_page(page);
 	}
 
 	/* driver's callback with pagelist */
 	fbdefio->deferred_io(info, &fbdefio->pagelist);
 
-	/* clear the list */
-	list_for_each_safe(node, next, &fbdefio->pagelist) {
+	/* clear the list... */
+	list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
 		list_del(node);
 	}
+	/* ... and add back the "non-dirty" pages to the list */
+	list_splice_tail(&non_dirty, &fbdefio->pagelist);
 	mutex_unlock(&fbdefio->lock);
 }
 
@@ -202,6 +218,7 @@
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
 	struct fb_deferred_io *fbdefio = info->fbdefio;
+	struct list_head *node, *tmp_node;
 	struct page *page;
 	int i;
 
@@ -209,6 +226,13 @@
 	cancel_delayed_work(&info->deferred_work);
 	flush_scheduled_work();
 
+	/*  the list may have still some non-dirty pages at this point */
+	mutex_lock(&fbdefio->lock);
+	list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
+		list_del(node);
+	}
+	mutex_unlock(&fbdefio->lock);
+
 	/* clear out the mapping that we setup */
 	for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
 		page = fb_deferred_io_page(info, i);
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 8bbf251..af8f0f2 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -106,7 +106,7 @@
 
 /* Framebuffer driver structures */
 
-static struct fb_var_screeninfo __initdata hga_default_var = {
+static struct fb_var_screeninfo hga_default_var __devinitdata = {
 	.xres		= 720,
 	.yres 		= 348,
 	.xres_virtual 	= 720,
@@ -120,7 +120,7 @@
 	.width 		= -1,
 };
 
-static struct fb_fix_screeninfo __initdata hga_fix = {
+static struct fb_fix_screeninfo hga_fix __devinitdata = {
 	.id 		= "HGA",
 	.type 		= FB_TYPE_PACKED_PIXELS,	/* (not sure) */
 	.visual 	= FB_VISUAL_MONO10,
@@ -276,7 +276,7 @@
 	spin_unlock_irqrestore(&hga_reg_lock, flags);
 }
 
-static int __init hga_card_detect(void)
+static int __devinit hga_card_detect(void)
 {
 	int count = 0;
 	void __iomem *p, *q;
@@ -596,7 +596,7 @@
 	return 0;
 }
 
-static int hgafb_remove(struct platform_device *pdev)
+static int __devexit hgafb_remove(struct platform_device *pdev)
 {
 	struct fb_info *info = platform_get_drvdata(pdev);
 
@@ -621,7 +621,7 @@
 
 static struct platform_driver hgafb_driver = {
 	.probe = hgafb_probe,
-	.remove = hgafb_remove,
+	.remove = __devexit_p(hgafb_remove),
 	.driver = {
 		.name = "hgafb",
 	},
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 393f3f3..cfb8d64 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -30,14 +30,14 @@
 
 #define	WIDTH 640
 
-static struct fb_var_screeninfo hitfb_var __initdata = {
+static struct fb_var_screeninfo hitfb_var __devinitdata = {
 	.activate	= FB_ACTIVATE_NOW,
 	.height		= -1,
 	.width		= -1,
 	.vmode		= FB_VMODE_NONINTERLACED,
 };
 
-static struct fb_fix_screeninfo hitfb_fix __initdata = {
+static struct fb_fix_screeninfo hitfb_fix __devinitdata = {
 	.id		= "Hitachi HD64461",
 	.type		= FB_TYPE_PACKED_PIXELS,
 	.accel		= FB_ACCEL_NONE,
@@ -417,7 +417,7 @@
 	return ret;
 }
 
-static int __exit hitfb_remove(struct platform_device *dev)
+static int __devexit hitfb_remove(struct platform_device *dev)
 {
 	struct fb_info *info = platform_get_drvdata(dev);
 
@@ -462,7 +462,7 @@
 
 static struct platform_driver hitfb_driver = {
 	.probe		= hitfb_probe,
-	.remove		= __exit_p(hitfb_remove),
+	.remove		= __devexit_p(hitfb_remove),
 	.driver		= {
 		.name	= "hitfb",
 		.owner	= THIS_MODULE,
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 4098455..6b51175 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -371,10 +371,6 @@
 			((dinfo)->chipset == INTEL_965G) ||	\
 			((dinfo)->chipset == INTEL_965GM))
 
-#ifndef FBIO_WAITFORVSYNC
-#define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
-#endif
-
 /*** function prototypes ***/
 
 extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var);
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 6bf0d46..d4cde79 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -667,7 +667,7 @@
 release_regs:
 	iounmap(fbi->io);
 release_mem_region:
-	release_mem_region((unsigned long)fbi->mem, size);
+	release_mem_region(res->start, size);
 free_fb:
 	framebuffer_release(fbinfo);
 	return ret;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 2b094de..46b4309 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -631,7 +631,7 @@
  *	cache.  Once this area is remapped, all virtual memory
  *	access to the video memory should occur at the new region.
  */
-static int __init s3c2410fb_map_video_memory(struct fb_info *info)
+static int __devinit s3c2410fb_map_video_memory(struct fb_info *info)
 {
 	struct s3c2410fb_info *fbi = info->par;
 	dma_addr_t map_dma;
@@ -814,7 +814,7 @@
 
 static char driver_name[] = "s3c2410fb";
 
-static int __init s3c24xxfb_probe(struct platform_device *pdev,
+static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
 				  enum s3c_drv_type drv_type)
 {
 	struct s3c2410fb_info *info;
@@ -1018,7 +1018,7 @@
 /*
  *  Cleanup
  */
-static int s3c2410fb_remove(struct platform_device *pdev)
+static int __devexit s3c2410fb_remove(struct platform_device *pdev)
 {
 	struct fb_info *fbinfo = platform_get_drvdata(pdev);
 	struct s3c2410fb_info *info = fbinfo->par;
@@ -1096,7 +1096,7 @@
 
 static struct platform_driver s3c2410fb_driver = {
 	.probe		= s3c2410fb_probe,
-	.remove		= s3c2410fb_remove,
+	.remove		= __devexit_p(s3c2410fb_remove),
 	.suspend	= s3c2410fb_suspend,
 	.resume		= s3c2410fb_resume,
 	.driver		= {
@@ -1107,7 +1107,7 @@
 
 static struct platform_driver s3c2412fb_driver = {
 	.probe		= s3c2412fb_probe,
-	.remove		= s3c2410fb_remove,
+	.remove		= __devexit_p(s3c2410fb_remove),
 	.suspend	= s3c2410fb_suspend,
 	.resume		= s3c2410fb_resume,
 	.driver		= {
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 7a3a5e2..53455f2 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -47,7 +47,7 @@
 
 static int flatpanel_id = -1;
 
-static struct fb_fix_screeninfo sgivwfb_fix __initdata = {
+static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = {
 	.id		= "SGI Vis WS FB",
 	.type		= FB_TYPE_PACKED_PIXELS,
         .visual		= FB_VISUAL_PSEUDOCOLOR,
@@ -57,7 +57,7 @@
 	.line_length	= 640,
 };
 
-static struct fb_var_screeninfo sgivwfb_var __initdata = {
+static struct fb_var_screeninfo sgivwfb_var __devinitdata = {
 	/* 640x480, 8 bpp */
 	.xres		= 640,
 	.yres		= 480,
@@ -79,7 +79,7 @@
 	.vmode		= FB_VMODE_NONINTERLACED
 };
 
-static struct fb_var_screeninfo sgivwfb_var1600sw __initdata = {
+static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = {
 	/* 1600x1024, 8 bpp */
 	.xres		= 1600,
 	.yres		= 1024,
@@ -825,7 +825,7 @@
 	return -ENXIO;
 }
 
-static int sgivwfb_remove(struct platform_device *dev)
+static int __devexit sgivwfb_remove(struct platform_device *dev)
 {
 	struct fb_info *info = platform_get_drvdata(dev);
 
@@ -845,7 +845,7 @@
 
 static struct platform_driver sgivwfb_driver = {
 	.probe	= sgivwfb_probe,
-	.remove	= sgivwfb_remove,
+	.remove	= __devexit_p(sgivwfb_remove),
 	.driver	= {
 		.name	= "sgivwfb",
 	},
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a531a0f..559bf17 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1845,7 +1845,7 @@
 
 	memset(fix, 0, sizeof(struct fb_fix_screeninfo));
 
-	strcpy(fix->id, ivideo->myid);
+	strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
 
 	mutex_lock(&info->mm_lock);
 	fix->smem_start  = ivideo->video_base + ivideo->video_offset;
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 9b5532b..bc67251 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -78,7 +78,7 @@
 	vfree(mem);
 }
 
-static struct fb_var_screeninfo vfb_default __initdata = {
+static struct fb_var_screeninfo vfb_default __devinitdata = {
 	.xres =		640,
 	.yres =		480,
 	.xres_virtual =	640,
@@ -100,7 +100,7 @@
       	.vmode =	FB_VMODE_NONINTERLACED,
 };
 
-static struct fb_fix_screeninfo vfb_fix __initdata = {
+static struct fb_fix_screeninfo vfb_fix __devinitdata = {
 	.id =		"Virtual FB",
 	.type =		FB_TYPE_PACKED_PIXELS,
 	.visual =	FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 149c47a..28ccab4 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -65,7 +65,7 @@
 
 /* --------------------------------------------------------------------- */
 
-static struct fb_var_screeninfo vga16fb_defined __initdata = {
+static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
 	.xres		= 640,
 	.yres		= 480,
 	.xres_virtual	= 640,
@@ -85,7 +85,7 @@
 };
 
 /* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix __initdata = {
+static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
 	.id		= "VGA16 VGA",
 	.smem_start	= VGA_FB_PHYS,
 	.smem_len	= VGA_FB_PHYS_LEN,
@@ -1287,7 +1287,7 @@
 };
 
 #ifndef MODULE
-static int vga16fb_setup(char *options)
+static int __init vga16fb_setup(char *options)
 {
 	char *this_opt;
 	
@@ -1393,7 +1393,7 @@
 	return ret;
 }
 
-static int vga16fb_remove(struct platform_device *dev)
+static int __devexit vga16fb_remove(struct platform_device *dev)
 {
 	struct fb_info *info = platform_get_drvdata(dev);
 
@@ -1405,7 +1405,7 @@
 
 static struct platform_driver vga16fb_driver = {
 	.probe = vga16fb_probe,
-	.remove = vga16fb_remove,
+	.remove = __devexit_p(vga16fb_remove),
 	.driver = {
 		.name = "vga16fb",
 	},
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 31b0e17..e66b8b1 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -53,7 +53,7 @@
 static void w100_update_disable(void);
 static void calc_hsync(struct w100fb_par *par);
 static void w100_init_graphic_engine(struct w100fb_par *par);
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
+struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
 
 /* Pseudo palette size */
 #define MAX_PALETTES      16
@@ -782,7 +782,7 @@
 }
 
 
-static int w100fb_remove(struct platform_device *pdev)
+static int __devexit w100fb_remove(struct platform_device *pdev)
 {
 	struct fb_info *info = platform_get_drvdata(pdev);
 	struct w100fb_par *par=info->par;
@@ -1020,7 +1020,7 @@
 	{ 0 },
 };
 
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
+struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
 {
 	struct pll_entries *pll_entry = w100_pll_tables;
 
@@ -1611,7 +1611,7 @@
 
 static struct platform_driver w100fb_driver = {
 	.probe		= w100fb_probe,
-	.remove		= w100fb_remove,
+	.remove		= __devexit_p(w100fb_remove),
 	.suspend	= w100fb_suspend,
 	.resume		= w100fb_resume,
 	.driver		= {
@@ -1619,7 +1619,7 @@
 	},
 };
 
-int __devinit w100fb_init(void)
+int __init w100fb_init(void)
 {
 	return platform_driver_register(&w100fb_driver);
 }
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 8943b8c..07e857b 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -185,6 +185,7 @@
 	kfree(str);
 }
 
+#ifdef CONFIG_MAGIC_SYSRQ
 static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
 			  unsigned int len)
 {
@@ -214,15 +215,16 @@
 		handle_sysrq(sysrq_key, NULL);
 }
 
-static struct xenbus_watch shutdown_watch = {
-	.node = "control/shutdown",
-	.callback = shutdown_handler
-};
-
 static struct xenbus_watch sysrq_watch = {
 	.node = "control/sysrq",
 	.callback = sysrq_handler
 };
+#endif
+
+static struct xenbus_watch shutdown_watch = {
+	.node = "control/shutdown",
+	.callback = shutdown_handler
+};
 
 static int setup_shutdown_watcher(void)
 {
@@ -234,11 +236,13 @@
 		return err;
 	}
 
+#ifdef CONFIG_MAGIC_SYSRQ
 	err = register_xenbus_watch(&sysrq_watch);
 	if (err) {
 		printk(KERN_ERR "Failed to set sysrq watcher\n");
 		return err;
 	}
+#endif
 
 	return 0;
 }
diff --git a/fs/exec.c b/fs/exec.c
index e6e94c6..9badbc0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -242,9 +242,10 @@
 	 * use STACK_TOP because that can depend on attributes which aren't
 	 * configured yet.
 	 */
+	BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 	vma->vm_end = STACK_TOP_MAX;
 	vma->vm_start = vma->vm_end - PAGE_SIZE;
-	vma->vm_flags = VM_STACK_FLAGS;
+	vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
 	err = insert_vm_struct(mm, vma);
@@ -616,6 +617,7 @@
 	else if (executable_stack == EXSTACK_DISABLE_X)
 		vm_flags &= ~VM_EXEC;
 	vm_flags |= mm->def_flags;
+	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 
 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 			vm_flags);
@@ -630,6 +632,9 @@
 			goto out_unlock;
 	}
 
+	/* mprotect_fixup is overkill to remove the temporary stack flags */
+	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+
 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 	stack_size = vma->vm_end - vma->vm_start;
 	/*
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 113f0a1..ae8200f 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -242,9 +242,10 @@
 	while (*fclus < cluster) {
 		/* prevent the infinite loop of cluster chain */
 		if (*fclus > limit) {
-			fat_fs_error(sb, "%s: detected the cluster chain loop"
-				     " (i_pos %lld)", __func__,
-				     MSDOS_I(inode)->i_pos);
+			fat_fs_error_ratelimit(sb,
+					"%s: detected the cluster chain loop"
+					" (i_pos %lld)", __func__,
+					MSDOS_I(inode)->i_pos);
 			nr = -EIO;
 			goto out;
 		}
@@ -253,9 +254,9 @@
 		if (nr < 0)
 			goto out;
 		else if (nr == FAT_ENT_FREE) {
-			fat_fs_error(sb, "%s: invalid cluster chain"
-				     " (i_pos %lld)", __func__,
-				     MSDOS_I(inode)->i_pos);
+			fat_fs_error_ratelimit(sb, "%s: invalid cluster chain"
+					       " (i_pos %lld)", __func__,
+					       MSDOS_I(inode)->i_pos);
 			nr = -EIO;
 			goto out;
 		} else if (nr == FAT_ENT_EOF) {
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index eb821ee..53dba57 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -6,6 +6,7 @@
 #include <linux/nls.h>
 #include <linux/fs.h>
 #include <linux/mutex.h>
+#include <linux/ratelimit.h>
 #include <linux/msdos_fs.h>
 
 /*
@@ -82,6 +83,8 @@
 	struct fatent_operations *fatent_ops;
 	struct inode *fat_inode;
 
+	struct ratelimit_state ratelimit;
+
 	spinlock_t inode_hash_lock;
 	struct hlist_head inode_hashtable[FAT_HASH_SIZE];
 };
@@ -322,8 +325,13 @@
 extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 		            struct inode *i2);
 /* fat/misc.c */
-extern void fat_fs_error(struct super_block *s, const char *fmt, ...)
-	__attribute__ ((format (printf, 2, 3))) __cold;
+extern void
+__fat_fs_error(struct super_block *s, int report, const char *fmt, ...)
+	__attribute__ ((format (printf, 3, 4))) __cold;
+#define fat_fs_error(s, fmt, args...)		\
+	__fat_fs_error(s, 1, fmt , ## args)
+#define fat_fs_error_ratelimit(s, fmt, args...) \
+	__fat_fs_error(s, __ratelimit(&MSDOS_SB(s)->ratelimit), fmt , ## args)
 extern int fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
 extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index c611818..ed33904 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1250,6 +1250,8 @@
 	sb->s_op = &fat_sops;
 	sb->s_export_op = &fat_export_ops;
 	sbi->dir_ops = fs_dir_inode_ops;
+	ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+			     DEFAULT_RATELIMIT_BURST);
 
 	error = parse_options(data, isvfat, silent, &debug, &sbi->options);
 	if (error)
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index d3da05f..1fa23f6 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -20,27 +20,29 @@
  * In case the file system is remounted read-only, it can be made writable
  * again by remounting it.
  */
-void fat_fs_error(struct super_block *s, const char *fmt, ...)
+void __fat_fs_error(struct super_block *s, int report, const char *fmt, ...)
 {
 	struct fat_mount_options *opts = &MSDOS_SB(s)->options;
 	va_list args;
 
-	printk(KERN_ERR "FAT: Filesystem error (dev %s)\n", s->s_id);
+	if (report) {
+		printk(KERN_ERR "FAT: Filesystem error (dev %s)\n", s->s_id);
 
-	printk(KERN_ERR "    ");
-	va_start(args, fmt);
-	vprintk(fmt, args);
-	va_end(args);
-	printk("\n");
+		printk(KERN_ERR "    ");
+		va_start(args, fmt);
+		vprintk(fmt, args);
+		va_end(args);
+		printk("\n");
+	}
 
 	if (opts->errors == FAT_ERRORS_PANIC)
-		panic("    FAT fs panic from previous error\n");
+		panic("FAT: fs panic from previous error\n");
 	else if (opts->errors == FAT_ERRORS_RO && !(s->s_flags & MS_RDONLY)) {
 		s->s_flags |= MS_RDONLY;
-		printk(KERN_ERR "    File system has been set read-only\n");
+		printk(KERN_ERR "FAT: Filesystem has been set read-only\n");
 	}
 }
-EXPORT_SYMBOL_GPL(fat_fs_error);
+EXPORT_SYMBOL_GPL(__fat_fs_error);
 
 /* Flushes the number of free clusters on FAT32 */
 /* XXX: Need to write one per FSINFO block.  Currently only writes 1 */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5c4161f..ea8592b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -409,11 +409,11 @@
 	wait_queue_head_t *wqh;
 
 	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
-	do {
+	 while (inode->i_state & I_SYNC) {
 		spin_unlock(&inode_lock);
 		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
 		spin_lock(&inode_lock);
-	} while (inode->i_state & I_SYNC);
+	}
 }
 
 /*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2f8b1157..04214fc 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1060,7 +1060,7 @@
 				goto out_nomem;
 			rc = strict_strtoul(string, 10, &option);
 			kfree(string);
-			if (rc != 0 || option > USHORT_MAX)
+			if (rc != 0 || option > USHRT_MAX)
 				goto out_invalid_value;
 			mnt->nfs_server.port = option;
 			break;
@@ -1181,7 +1181,7 @@
 				goto out_nomem;
 			rc = strict_strtoul(string, 10, &option);
 			kfree(string);
-			if (rc != 0 || option > USHORT_MAX)
+			if (rc != 0 || option > USHRT_MAX)
 				goto out_invalid_value;
 			mnt->mount_server.port = option;
 			break;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index bc3194e..508941c 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -998,7 +998,7 @@
 	if (sscanf(buf, "%15s %4u", transport, &port) != 2)
 		return -EINVAL;
 
-	if (port < 1 || port > USHORT_MAX)
+	if (port < 1 || port > USHRT_MAX)
 		return -EINVAL;
 
 	err = nfsd_create_serv();
@@ -1040,7 +1040,7 @@
 	if (sscanf(&buf[1], "%15s %4u", transport, &port) != 2)
 		return -EINVAL;
 
-	if (port < 1 || port > USHORT_MAX || nfsd_serv == NULL)
+	if (port < 1 || port > USHRT_MAX || nfsd_serv == NULL)
 		return -EINVAL;
 
 	xprt = svc_find_xprt(nfsd_serv, transport, AF_UNSPEC, port);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 8804f09..a1924a0 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -98,9 +98,6 @@
  * the page at all.  For a more detailed explanation see ntfs_truncate() in
  * fs/ntfs/inode.c.
  *
- * @cached_page and @lru_pvec are just optimizations for dealing with multiple
- * pages.
- *
  * Return 0 on success and -errno on error.  In the case that an error is
  * encountered it is possible that the initialized size will already have been
  * incremented some way towards @new_init_size but it is guaranteed that if
@@ -110,8 +107,7 @@
  * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
  *	    held by the caller.
  */
-static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
-		struct page **cached_page, struct pagevec *lru_pvec)
+static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
 {
 	s64 old_init_size;
 	loff_t old_i_size;
@@ -403,18 +399,13 @@
  * Obtain @nr_pages locked page cache pages from the mapping @mapping and
  * starting at index @index.
  *
- * If a page is newly created, increment its refcount and add it to the
- * caller's lru-buffering pagevec @lru_pvec.
- *
- * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
- * are obtained at once instead of just one page and that 0 is returned on
- * success and -errno on error.
+ * If a page is newly created, add it to lru list
  *
  * Note, the page locks are obtained in ascending page index order.
  */
 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
 		pgoff_t index, const unsigned nr_pages, struct page **pages,
-		struct page **cached_page, struct pagevec *lru_pvec)
+		struct page **cached_page)
 {
 	int err, nr;
 
@@ -430,7 +421,7 @@
 					goto err_out;
 				}
 			}
-			err = add_to_page_cache(*cached_page, mapping, index,
+			err = add_to_page_cache_lru(*cached_page, mapping, index,
 					GFP_KERNEL);
 			if (unlikely(err)) {
 				if (err == -EEXIST)
@@ -438,9 +429,6 @@
 				goto err_out;
 			}
 			pages[nr] = *cached_page;
-			page_cache_get(*cached_page);
-			if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
-				__pagevec_lru_add_file(lru_pvec);
 			*cached_page = NULL;
 		}
 		index++;
@@ -1800,7 +1788,6 @@
 	ssize_t status, written;
 	unsigned nr_pages;
 	int err;
-	struct pagevec lru_pvec;
 
 	ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
 			"pos 0x%llx, count 0x%lx.",
@@ -1912,7 +1899,6 @@
 			}
 		}
 	}
-	pagevec_init(&lru_pvec, 0);
 	written = 0;
 	/*
 	 * If the write starts beyond the initialized size, extend it up to the
@@ -1925,8 +1911,7 @@
 	ll = ni->initialized_size;
 	read_unlock_irqrestore(&ni->size_lock, flags);
 	if (pos > ll) {
-		err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
-				&lru_pvec);
+		err = ntfs_attr_extend_initialized(ni, pos);
 		if (err < 0) {
 			ntfs_error(vol->sb, "Cannot perform write to inode "
 					"0x%lx, attribute type 0x%x, because "
@@ -2012,7 +1997,7 @@
 			ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
 		/* Get and lock @do_pages starting at index @start_idx. */
 		status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
-				pages, &cached_page, &lru_pvec);
+				pages, &cached_page);
 		if (unlikely(status))
 			break;
 		/*
@@ -2077,7 +2062,6 @@
 	*ppos = pos;
 	if (cached_page)
 		page_cache_release(cached_page);
-	pagevec_lru_add_file(&lru_pvec);
 	ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
 			written ? "written" : "status", (unsigned long)written,
 			(long)status);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index b7428c5..ec6d123 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -403,7 +403,7 @@
 	 * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no
 	 * larger than 16 bits.
 	 */
-	BUG_ON(ecc > USHORT_MAX);
+	BUG_ON(ecc > USHRT_MAX);
 
 	bc->bc_crc32e = cpu_to_le32(crc);
 	bc->bc_ecc = cpu_to_le16((u16)ecc);
@@ -508,7 +508,7 @@
 	 * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no
 	 * larger than 16 bits.
 	 */
-	BUG_ON(ecc > USHORT_MAX);
+	BUG_ON(ecc > USHRT_MAX);
 
 	bc->bc_crc32e = cpu_to_le32(crc);
 	bc->bc_ecc = cpu_to_le16((u16)ecc);
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 3ceca05..648c9d8 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/stringify.h>
+#include <linux/kernel.h>
 #include "ldm.h"
 #include "check.h"
 #include "msdos.h"
@@ -77,17 +78,16 @@
 	int h;
 
 	/* high part */
-	if      ((x = src[0] - '0') <= '9'-'0') h = x;
-	else if ((x = src[0] - 'a') <= 'f'-'a') h = x+10;
-	else if ((x = src[0] - 'A') <= 'F'-'A') h = x+10;
-	else return -1;
-	h <<= 4;
+	x = h = hex_to_bin(src[0]);
+	if (h < 0)
+		return -1;
 
 	/* low part */
-	if ((x = src[1] - '0') <= '9'-'0') return h | x;
-	if ((x = src[1] - 'a') <= 'f'-'a') return h | (x+10);
-	if ((x = src[1] - 'A') <= 'F'-'A') return h | (x+10);
-	return -1;
+	h = hex_to_bin(src[1]);
+	if (h < 0)
+		return -1;
+
+	return (x << 4) + h;
 }
 
 /**
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 47f5b14..aea1d3f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -634,6 +634,7 @@
 	return err;
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
 {
 	u64 pme = 0;
@@ -664,6 +665,7 @@
 
 	return err;
 }
+#endif /* HUGETLB_PAGE */
 
 /*
  * /proc/pid/pagemap - an array mapping virtual pages to pfns
@@ -733,7 +735,9 @@
 
 	pagemap_walk.pmd_entry = pagemap_pte_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
+#ifdef CONFIG_HUGETLB_PAGE
 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
+#endif
 	pagemap_walk.mm = mm;
 	pagemap_walk.private = &pm;
 
diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
index 54350b5..00b2909 100644
--- a/fs/smbfs/symlink.c
+++ b/fs/smbfs/symlink.c
@@ -15,7 +15,6 @@
 #include <linux/pagemap.h>
 #include <linux/net.h>
 #include <linux/namei.h>
-#include <linux/slab.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index c33749f..058129e 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -30,8 +30,7 @@
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
  *
- * Atomically reads the value of @v.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically reads the value of @v.
  */
 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
 
@@ -40,8 +39,7 @@
  * @v: pointer of type atomic_t
  * @i: required value
  *
- * Atomically sets the value of @v to @i.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically sets the value of @v to @i.
  */
 #define atomic_set(v, i) (((v)->counter) = (i))
 
@@ -53,7 +51,6 @@
  * @v: pointer of type atomic_t
  *
  * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  */
 static inline int atomic_add_return(int i, atomic_t *v)
 {
@@ -75,7 +72,6 @@
  * @v: pointer of type atomic_t
  *
  * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
  */
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 97e807c..0232ccb 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -29,6 +29,9 @@
 KMAP_D(17)	KM_NMI,
 KMAP_D(18)	KM_NMI_PTE,
 KMAP_D(19)	KM_KDB,
+/*
+ * Remember to update debug_kmap_atomic() when adding new kmap types!
+ */
 KMAP_D(20)	KM_TYPE_NR
 };
 
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 3c80fd7..d53a67d 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -7,6 +7,9 @@
 #ifndef __BIG_ENDIAN_BITFIELD
 #define __BIG_ENDIAN_BITFIELD
 #endif
+#ifndef __BYTE_ORDER
+#define __BYTE_ORDER __BIG_ENDIAN
+#endif
 
 #include <linux/types.h>
 #include <linux/swab.h>
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 83195fb..f7f8ad1 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -7,6 +7,9 @@
 #ifndef __LITTLE_ENDIAN_BITFIELD
 #define __LITTLE_ENDIAN_BITFIELD
 #endif
+#ifndef __BYTE_ORDER
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
 
 #include <linux/types.h>
 #include <linux/swab.h>
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
new file mode 100644
index 0000000..5ac5155
--- /dev/null
+++ b/include/linux/compaction.h
@@ -0,0 +1,89 @@
+#ifndef _LINUX_COMPACTION_H
+#define _LINUX_COMPACTION_H
+
+/* Return values for compact_zone() and try_to_compact_pages() */
+/* compaction didn't start as it was not possible or direct reclaim was more suitable */
+#define COMPACT_SKIPPED		0
+/* compaction should continue to another pageblock */
+#define COMPACT_CONTINUE	1
+/* direct compaction partially compacted a zone and there are suitable pages */
+#define COMPACT_PARTIAL		2
+/* The full zone was compacted */
+#define COMPACT_COMPLETE	3
+
+#ifdef CONFIG_COMPACTION
+extern int sysctl_compact_memory;
+extern int sysctl_compaction_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_extfrag_threshold;
+extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos);
+
+extern int fragmentation_index(struct zone *zone, unsigned int order);
+extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
+			int order, gfp_t gfp_mask, nodemask_t *mask);
+
+/* Do not skip compaction more than 64 times */
+#define COMPACT_MAX_DEFER_SHIFT 6
+
+/*
+ * Compaction is deferred when compaction fails to result in a page
+ * allocation success. 1 << compact_defer_limit compactions are skipped up
+ * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
+ */
+static inline void defer_compaction(struct zone *zone)
+{
+	zone->compact_considered = 0;
+	zone->compact_defer_shift++;
+
+	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
+		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
+}
+
+/* Returns true if compaction should be skipped this time */
+static inline bool compaction_deferred(struct zone *zone)
+{
+	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+
+	/* Avoid possible overflow */
+	if (++zone->compact_considered > defer_limit)
+		zone->compact_considered = defer_limit;
+
+	return zone->compact_considered < (1UL << zone->compact_defer_shift);
+}
+
+#else
+static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
+			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+{
+	return COMPACT_CONTINUE;
+}
+
+static inline void defer_compaction(struct zone *zone)
+{
+}
+
+static inline bool compaction_deferred(struct zone *zone)
+{
+	return 1;
+}
+
+#endif /* CONFIG_COMPACTION */
+
+#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int compaction_register_node(struct node *node);
+extern void compaction_unregister_node(struct node *node);
+
+#else
+
+static inline int compaction_register_node(struct node *node)
+{
+	return 0;
+}
+
+static inline void compaction_unregister_node(struct node *node)
+{
+}
+#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* _LINUX_COMPACTION_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a73454a..20b51ca 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -86,9 +86,44 @@
 
 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
 
+/*
+ * reading current mems_allowed and mempolicy in the fastpath must protected
+ * by get_mems_allowed()
+ */
+static inline void get_mems_allowed(void)
+{
+	current->mems_allowed_change_disable++;
+
+	/*
+	 * ensure that reading mems_allowed and mempolicy happens after the
+	 * update of ->mems_allowed_change_disable.
+	 *
+	 * the write-side task finds ->mems_allowed_change_disable is not 0,
+	 * and knows the read-side task is reading mems_allowed or mempolicy,
+	 * so it will clear old bits lazily.
+	 */
+	smp_mb();
+}
+
+static inline void put_mems_allowed(void)
+{
+	/*
+	 * ensure that reading mems_allowed and mempolicy before reducing
+	 * mems_allowed_change_disable.
+	 *
+	 * the write-side task will know that the read-side task is still
+	 * reading mems_allowed or mempolicy, don't clears old bits in the
+	 * nodemask.
+	 */
+	smp_mb();
+	--ACCESS_ONCE(current->mems_allowed_change_disable);
+}
+
 static inline void set_mems_allowed(nodemask_t nodemask)
 {
+	task_lock(current);
 	current->mems_allowed = nodemask;
+	task_unlock(current);
 }
 
 #else /* !CONFIG_CPUSETS */
@@ -187,6 +222,14 @@
 {
 }
 
+static inline void get_mems_allowed(void)
+{
+}
+
+static inline void put_mems_allowed(void)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index f8c2e17..b3cd4de 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -28,7 +28,7 @@
 	/*
  	 * The flags field controls the behaviour at the callsite.
  	 * The bits here are changed dynamically when the user
- 	 * writes commands to <debugfs>/dynamic_debug/ddebug
+	 * writes commands to <debugfs>/dynamic_debug/control
 	 */
 #define _DPRINTK_FLAGS_PRINT   (1<<0)  /* printk() a message using the format */
 #define _DPRINTK_FLAGS_DEFAULT 0
diff --git a/include/linux/err.h b/include/linux/err.h
index 1b12642..448afc1 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -19,22 +19,22 @@
 
 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
-static inline void *ERR_PTR(long error)
+static inline void * __must_check ERR_PTR(long error)
 {
 	return (void *) error;
 }
 
-static inline long PTR_ERR(const void *ptr)
+static inline long __must_check PTR_ERR(const void *ptr)
 {
 	return (long) ptr;
 }
 
-static inline long IS_ERR(const void *ptr)
+static inline long __must_check IS_ERR(const void *ptr)
 {
 	return IS_ERR_VALUE((unsigned long)ptr);
 }
 
-static inline long IS_ERR_OR_NULL(const void *ptr)
+static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
 {
 	return !ptr || IS_ERR_VALUE((unsigned long)ptr);
 }
@@ -46,7 +46,7 @@
  * Explicitly cast an error-valued pointer to another pointer type in such a
  * way as to make it clear that's what's going on.
  */
-static inline void *ERR_CAST(const void *ptr)
+static inline void * __must_check ERR_CAST(const void *ptr)
 {
 	/* cast away the const */
 	return (void *) ptr;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 1296af4..f3793eb 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -37,7 +37,7 @@
 #define FBIOGET_HWCINFO         0x4616
 #define FBIOPUT_MODEINFO        0x4617
 #define FBIOGET_DISPINFO        0x4618
-
+#define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
 
 #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
 #define FB_TYPE_PLANES			1	/* Non interleaved planes */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4c6d413..975609c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -15,7 +15,7 @@
  * Zone modifiers (see linux/mmzone.h - low three bits)
  *
  * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use the consistently. The definitions here may
+ * without the underscores and use them consistently. The definitions here may
  * be used in bit comparisons.
  */
 #define __GFP_DMA	((__force gfp_t)0x01u)
@@ -101,7 +101,7 @@
 			__GFP_NORETRY|__GFP_NOMEMALLOC)
 
 /* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
+#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
 
 /* Control allocation constraints */
 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
@@ -152,12 +152,12 @@
  * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
  * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
  * and there are 16 of them to cover all possible combinations of
- * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM
+ * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
  *
  * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
  * But GFP_MOVABLE is not only a zone specifier but also an allocation
  * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
- * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1".
+ * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
  *
  *       bit       result
  *       =================
@@ -187,7 +187,7 @@
 
 #define GFP_ZONE_TABLE ( \
 	(ZONE_NORMAL << 0 * ZONES_SHIFT)				\
-	| (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) 			\
+	| (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT)			\
 	| (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT)		\
 	| (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT)			\
 	| (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT)			\
@@ -197,7 +197,7 @@
 )
 
 /*
- * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32
+ * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
  * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
  * entry starting with bit 0. Bit is set if the combination is not
  * allowed.
@@ -320,17 +320,17 @@
 void free_pages_exact(void *virt, size_t size);
 
 #define __get_free_page(gfp_mask) \
-		__get_free_pages((gfp_mask),0)
+		__get_free_pages((gfp_mask), 0)
 
 #define __get_dma_pages(gfp_mask, order) \
-		__get_free_pages((gfp_mask) | GFP_DMA,(order))
+		__get_free_pages((gfp_mask) | GFP_DMA, (order))
 
 extern void __free_pages(struct page *page, unsigned int order);
 extern void free_pages(unsigned long addr, unsigned int order);
 extern void free_hot_cold_page(struct page *page, int cold);
 
 #define __free_page(page) __free_pages((page), 0)
-#define free_page(addr) free_pages((addr),0)
+#define free_page(addr) free_pages((addr), 0)
 
 void page_alloc_init(void);
 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 74152c0..caafd05 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -27,7 +27,7 @@
 
 #include <asm/kmap_types.h>
 
-#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
+#ifdef CONFIG_DEBUG_HIGHMEM
 
 void debug_kmap_atomic(enum km_type type);
 
diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h
index 9d88b29..e8b92f6 100644
--- a/include/linux/ivtvfb.h
+++ b/include/linux/ivtvfb.h
@@ -33,6 +33,5 @@
 };
 
 #define IVTVFB_IOC_DMA_FRAME 	_IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame)
-#define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
 
 #endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cc5e3ff..8317ec4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -24,9 +24,9 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-#define USHORT_MAX	((u16)(~0U))
-#define SHORT_MAX	((s16)(USHORT_MAX>>1))
-#define SHORT_MIN	(-SHORT_MAX - 1)
+#define USHRT_MAX	((u16)(~0U))
+#define SHRT_MAX	((s16)(USHRT_MAX>>1))
+#define SHRT_MIN	((s16)(-SHRT_MAX - 1))
 #define INT_MAX		((int)(~0U>>1))
 #define INT_MIN		(-INT_MAX - 1)
 #define UINT_MAX	(~0U)
@@ -375,6 +375,8 @@
 	return buf;
 }
 
+extern int hex_to_bin(char ch);
+
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
@@ -389,6 +391,7 @@
         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
 #define pr_notice(fmt, ...) \
         printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
@@ -423,14 +426,13 @@
  * no local ratelimit_state used in the !PRINTK case
  */
 #ifdef CONFIG_PRINTK
-#define printk_ratelimited(fmt, ...)  ({		\
-	static struct ratelimit_state _rs = {		\
-		.interval = DEFAULT_RATELIMIT_INTERVAL, \
-		.burst = DEFAULT_RATELIMIT_BURST,       \
-	};                                              \
-							\
-	if (__ratelimit(&_rs))                          \
-		printk(fmt, ##__VA_ARGS__);		\
+#define printk_ratelimited(fmt, ...)  ({				\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      DEFAULT_RATELIMIT_BURST);		\
+									\
+	if (__ratelimit(&_rs))						\
+		printk(fmt, ##__VA_ARGS__);				\
 })
 #else
 /* No effect, but we still get type checking even in the !PRINTK case: */
@@ -447,6 +449,7 @@
 	printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning_ratelimited(fmt, ...) \
 	printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_ratelimited pr_warning_ratelimited
 #define pr_notice_ratelimited(fmt, ...) \
 	printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info_ratelimited(fmt, ...) \
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index f1ca0dc..0e8a346 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -25,12 +25,14 @@
 #define LIS3_IRQ1_FF_WU_12	(3 << 0)
 #define LIS3_IRQ1_DATA_READY	(4 << 0)
 #define LIS3_IRQ1_CLICK		(7 << 0)
+#define LIS3_IRQ1_MASK		(7 << 0)
 #define LIS3_IRQ2_DISABLE	(0 << 3)
 #define LIS3_IRQ2_FF_WU_1	(1 << 3)
 #define LIS3_IRQ2_FF_WU_2	(2 << 3)
 #define LIS3_IRQ2_FF_WU_12	(3 << 3)
 #define LIS3_IRQ2_DATA_READY	(4 << 3)
 #define LIS3_IRQ2_CLICK		(7 << 3)
+#define LIS3_IRQ2_MASK		(7 << 3)
 #define LIS3_IRQ_OPEN_DRAIN	(1 << 6)
 #define LIS3_IRQ_ACTIVE_LOW	(1 << 7)
 	unsigned char irq_cfg;
@@ -43,6 +45,15 @@
 #define LIS3_WAKEUP_Z_HI	(1 << 5)
 	unsigned char wakeup_flags;
 	unsigned char wakeup_thresh;
+	unsigned char wakeup_flags2;
+	unsigned char wakeup_thresh2;
+#define LIS3_HIPASS_CUTFF_8HZ   0
+#define LIS3_HIPASS_CUTFF_4HZ   1
+#define LIS3_HIPASS_CUTFF_2HZ   2
+#define LIS3_HIPASS_CUTFF_1HZ   3
+#define LIS3_HIPASS1_DISABLE    (1 << 2)
+#define LIS3_HIPASS2_DISABLE    (1 << 3)
+	unsigned char hipass_ctrl;
 #define LIS3_NO_MAP		0
 #define LIS3_DEV_X		1
 #define LIS3_DEV_Y		2
@@ -58,6 +69,7 @@
 	/* Limits for selftest are specified in chip data sheet */
 	s16 st_min_limits[3]; /* min pass limit x, y, z */
 	s16 st_max_limits[3]; /* max pass limit x, y, z */
+	int irq2;
 };
 
 #endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h
index 2203121..8c22a89 100644
--- a/include/linux/matroxfb.h
+++ b/include/linux/matroxfb.h
@@ -4,6 +4,7 @@
 #include <asm/ioctl.h>
 #include <linux/types.h>
 #include <linux/videodev2.h>
+#include <linux/fb.h>
 
 struct matroxioc_output_mode {
 	__u32	output;		/* which output */
@@ -37,7 +38,5 @@
   MATROXFB_CID_LAST
 };
 
-#define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
-
 #endif
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44301c6..0589479 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,6 +25,13 @@
 struct page;
 struct mm_struct;
 
+extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
+					struct list_head *dst,
+					unsigned long *scanned, int order,
+					int mode, struct zone *z,
+					struct mem_cgroup *mem_cont,
+					int active, int file);
+
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -64,12 +71,6 @@
 extern int mem_cgroup_shmem_charge_fallback(struct page *page,
 			struct mm_struct *mm, gfp_t gfp_mask);
 
-extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
-					struct list_head *dst,
-					unsigned long *scanned, int order,
-					int mode, struct zone *z,
-					struct mem_cgroup *mem_cont,
-					int active, int file);
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 35b07b7..864035f 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -202,6 +202,7 @@
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 
+extern int mem_online_node(int nid);
 extern int add_memory(int nid, u64 start, u64 size);
 extern int arch_add_memory(int nid, u64 start, u64 size);
 extern int remove_memory(u64 start, u64 size);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 1cc966c..7b9ef6b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -23,6 +23,13 @@
 	MPOL_MAX,	/* always last member of enum */
 };
 
+enum mpol_rebind_step {
+	MPOL_REBIND_ONCE,	/* do rebind work at once(not by two step) */
+	MPOL_REBIND_STEP1,	/* first step(set all the newly nodes) */
+	MPOL_REBIND_STEP2,	/* second step(clean all the disallowed nodes)*/
+	MPOL_REBIND_NSTEP,
+};
+
 /* Flags for set_mempolicy */
 #define MPOL_F_STATIC_NODES	(1 << 15)
 #define MPOL_F_RELATIVE_NODES	(1 << 14)
@@ -51,6 +58,7 @@
  */
 #define MPOL_F_SHARED  (1 << 0)	/* identify shared policies */
 #define MPOL_F_LOCAL   (1 << 1)	/* preferred local allocation */
+#define MPOL_F_REBINDING (1 << 2)	/* identify policies in rebinding */
 
 #ifdef __KERNEL__
 
@@ -193,8 +201,8 @@
 
 extern void numa_default_policy(void);
 extern void numa_policy_init(void);
-extern void mpol_rebind_task(struct task_struct *tsk,
-					const nodemask_t *new);
+extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
+				enum mpol_rebind_step step);
 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 extern void mpol_fix_fork_child_flag(struct task_struct *p);
 
@@ -308,7 +316,8 @@
 }
 
 static inline void mpol_rebind_task(struct task_struct *tsk,
-					const nodemask_t *new)
+				const nodemask_t *new,
+				enum mpol_rebind_step step)
 {
 }
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 7f085c9..7238231 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -9,7 +9,7 @@
 #ifdef CONFIG_MIGRATION
 #define PAGE_MIGRATION 1
 
-extern int putback_lru_pages(struct list_head *l);
+extern void putback_lru_pages(struct list_head *l);
 extern int migrate_page(struct address_space *,
 			struct page *, struct page *);
 extern int migrate_pages(struct list_head *l, new_page_t x,
@@ -19,17 +19,19 @@
 			struct page *, struct page *);
 
 extern int migrate_prep(void);
+extern int migrate_prep_local(void);
 extern int migrate_vmas(struct mm_struct *mm,
 		const nodemask_t *from, const nodemask_t *to,
 		unsigned long flags);
 #else
 #define PAGE_MIGRATION 0
 
-static inline int putback_lru_pages(struct list_head *l) { return 0; }
+static inline void putback_lru_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
 		unsigned long private, int offlining) { return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
+static inline int migrate_prep_local(void) { return -ENOSYS; }
 
 static inline int migrate_vmas(struct mm_struct *mm,
 		const nodemask_t *from, const nodemask_t *to,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fb19bb9..b969efb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -13,6 +13,7 @@
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
+#include <linux/pfn.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -106,6 +107,9 @@
 #define VM_PFN_AT_MMAP	0x40000000	/* PFNMAP vma that is fully mapped at mmap time */
 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
 
+/* Bits set in the VMA until the stack is in its final location */
+#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
+
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 #endif
@@ -334,6 +338,7 @@
 void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
+int split_free_page(struct page *page);
 
 /*
  * Compound pages have a destructor function.  Provide a
@@ -591,7 +596,7 @@
 
 static __always_inline void *lowmem_page_address(struct page *page)
 {
-	return __va(page_to_pfn(page) << PAGE_SHIFT);
+	return __va(PFN_PHYS(page_to_pfn(page)));
 }
 
 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cf9e458..0fa4913 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -321,6 +321,15 @@
 	unsigned long		*pageblock_flags;
 #endif /* CONFIG_SPARSEMEM */
 
+#ifdef CONFIG_COMPACTION
+	/*
+	 * On compaction failure, 1<<compact_defer_shift compactions
+	 * are skipped before trying again. The number attempted since
+	 * last failure is tracked with compact_considered.
+	 */
+	unsigned int		compact_considered;
+	unsigned int		compact_defer_shift;
+#endif
 
 	ZONE_PADDING(_pad1_)
 
@@ -641,9 +650,10 @@
 
 #include <linux/memory_hotplug.h>
 
+extern struct mutex zonelists_mutex;
 void get_zone_counts(unsigned long *active, unsigned long *inactive,
 			unsigned long *free);
-void build_all_zonelists(void);
+void build_all_zonelists(void *data);
 void wakeup_kswapd(struct zone *zone, int order);
 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 		int classzone_idx, int alloc_flags);
@@ -972,7 +982,7 @@
 #endif
 
 #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
-#define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
+#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
 #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
 
 #ifdef CONFIG_SPARSEMEM_EXTREME
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 668cf1b..8f69d09 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -2,7 +2,7 @@
 #define _LINUX_RATELIMIT_H
 
 #include <linux/param.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
 
 #define DEFAULT_RATELIMIT_INTERVAL	(5 * HZ)
 #define DEFAULT_RATELIMIT_BURST		10
@@ -25,6 +25,17 @@
 		.burst		= burst_init,				\
 	}
 
+static inline void ratelimit_state_init(struct ratelimit_state *rs,
+					int interval, int burst)
+{
+	spin_lock_init(&rs->lock);
+	rs->interval = interval;
+	rs->burst = burst;
+	rs->printed = 0;
+	rs->missed = 0;
+	rs->begin = 0;
+}
+
 extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
 #define __ratelimit(state) ___ratelimit(state, __func__)
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d25bd22..7721674 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,8 +26,17 @@
  */
 struct anon_vma {
 	spinlock_t lock;	/* Serialize access to vma list */
-#ifdef CONFIG_KSM
-	atomic_t ksm_refcount;
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+
+	/*
+	 * The external_refcount is taken by either KSM or page migration
+	 * to take a reference to an anon_vma when there is no
+	 * guarantee that the vma of page tables will exist for
+	 * the duration of the operation. A caller that takes
+	 * the reference is responsible for clearing up the
+	 * anon_vma if they are the last user on release
+	 */
+	atomic_t external_refcount;
 #endif
 	/*
 	 * NOTE: the LSB of the head.next is set by
@@ -61,22 +70,22 @@
 };
 
 #ifdef CONFIG_MMU
-#ifdef CONFIG_KSM
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
 {
-	atomic_set(&anon_vma->ksm_refcount, 0);
+	atomic_set(&anon_vma->external_refcount, 0);
 }
 
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
 {
-	return atomic_read(&anon_vma->ksm_refcount);
+	return atomic_read(&anon_vma->external_refcount);
 }
 #else
-static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
 {
 }
 
-static inline int ksm_refcount(struct anon_vma *anon_vma)
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
 {
 	return 0;
 }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b55e988..c0151ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -384,7 +384,7 @@
  * 1-3 now and depends on arch. We use "5" as safe margin, here.
  */
 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
-#define DEFAULT_MAX_MAP_COUNT	(USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 
 extern int sysctl_max_map_count;
 
@@ -1421,6 +1421,7 @@
 #endif
 #ifdef CONFIG_CPUSETS
 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
+	int mems_allowed_change_disable;
 	int cpuset_mem_spread_rotor;
 #endif
 #ifdef CONFIG_CGROUPS
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ec2b7a4..b6b6143 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -152,6 +152,7 @@
 };
 
 #define SWAP_CLUSTER_MAX 32
+#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 
 #define SWAP_MAP_MAX	0x3e	/* Max duplication count, in first swap_map */
 #define SWAP_MAP_BAD	0x3f	/* Note pageblock is bad, in first swap_map */
@@ -224,20 +225,15 @@
 	__lru_cache_add(page, LRU_INACTIVE_ANON);
 }
 
-static inline void lru_cache_add_active_anon(struct page *page)
-{
-	__lru_cache_add(page, LRU_ACTIVE_ANON);
-}
-
 static inline void lru_cache_add_file(struct page *page)
 {
 	__lru_cache_add(page, LRU_INACTIVE_FILE);
 }
 
-static inline void lru_cache_add_active_file(struct page *page)
-{
-	__lru_cache_add(page, LRU_ACTIVE_FILE);
-}
+/* LRU Isolation modes. */
+#define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
+#define ISOLATE_ACTIVE 1	/* Isolate active pages. */
+#define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
 
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 117f0dd..7f43ccd 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -43,6 +43,10 @@
 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
 		KSWAPD_SKIP_CONGESTION_WAIT,
 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_COMPACTION
+		COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+		COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+#endif
 #ifdef CONFIG_HUGETLB_PAGE
 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
diff --git a/include/net/ip.h b/include/net/ip.h
index 63548f0..452f229 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -358,11 +358,11 @@
 	IP_DEFRAG_LOCAL_DELIVER,
 	IP_DEFRAG_CALL_RA_CHAIN,
 	IP_DEFRAG_CONNTRACK_IN,
-	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHORT_MAX,
+	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
 	IP_DEFRAG_CONNTRACK_OUT,
-	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
+	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
 	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
-	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
+	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
 	IP_DEFRAG_VS_IN,
 	IP_DEFRAG_VS_OUT,
 	IP_DEFRAG_VS_FWD
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eba5cc0..2600b69 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -354,11 +354,11 @@
 enum ip6_defrag_users {
 	IP6_DEFRAG_LOCAL_DELIVER,
 	IP6_DEFRAG_CONNTRACK_IN,
-	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHORT_MAX,
+	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
 	IP6_DEFRAG_CONNTRACK_OUT,
-	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
+	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
 	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
-	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
+	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
 };
 
 struct ip6_create_arg {
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index 89d43b3..6316cda 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -99,6 +99,7 @@
 #define FBIPUT_COLOR		_IOW('F', 6, int)
 #define FBIPUT_HSYNC		_IOW('F', 9, int)
 #define FBIPUT_VSYNC		_IOW('F', 10, int)
+#define FBIO_WAITFORVSYNC	_IOW('F', 0x20, u_int32_t)
 
 #endif  /* ifndef DA8XX_FB_H */
 
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 2cc893f..2882054 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -34,8 +34,6 @@
 #define LCDC_FLAGS_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
 #define LCDC_FLAGS_DWCNT (1 << 4) /* Disable dotclock during blanking */
 
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
-
 struct sh_mobile_lcdc_sys_bus_cfg {
 	unsigned long ldmt2r;
 	unsigned long ldmt3r;
diff --git a/init/main.c b/init/main.c
index 22881b5..3bdb152 100644
--- a/init/main.c
+++ b/init/main.c
@@ -567,7 +567,7 @@
 	setup_per_cpu_areas();
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
 
-	build_all_zonelists();
+	build_all_zonelists(NULL);
 	page_alloc_init();
 
 	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
diff --git a/ipc/msg.c b/ipc/msg.c
index 9547cb7..747b655 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -345,19 +345,19 @@
 		out.msg_rtime		= in->msg_rtime;
 		out.msg_ctime		= in->msg_ctime;
 
-		if (in->msg_cbytes > USHORT_MAX)
-			out.msg_cbytes	= USHORT_MAX;
+		if (in->msg_cbytes > USHRT_MAX)
+			out.msg_cbytes	= USHRT_MAX;
 		else
 			out.msg_cbytes	= in->msg_cbytes;
 		out.msg_lcbytes		= in->msg_cbytes;
 
-		if (in->msg_qnum > USHORT_MAX)
-			out.msg_qnum	= USHORT_MAX;
+		if (in->msg_qnum > USHRT_MAX)
+			out.msg_qnum	= USHRT_MAX;
 		else
 			out.msg_qnum	= in->msg_qnum;
 
-		if (in->msg_qbytes > USHORT_MAX)
-			out.msg_qbytes	= USHORT_MAX;
+		if (in->msg_qbytes > USHRT_MAX)
+			out.msg_qbytes	= USHRT_MAX;
 		else
 			out.msg_qbytes	= in->msg_qbytes;
 		out.msg_lqbytes		= in->msg_qbytes;
diff --git a/ipc/util.c b/ipc/util.c
index 79ce84e..69a0cc1 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -124,8 +124,8 @@
 	ids->seq = 0;
 	{
 		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
-		if (seq_limit > USHORT_MAX)
-			ids->seq_max = USHORT_MAX;
+		if (seq_limit > USHRT_MAX)
+			ids->seq_max = USHRT_MAX;
 		 else
 		 	ids->seq_max = seq_limit;
 	}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5457775..124ad9d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -326,6 +326,12 @@
 int __cpuinit cpu_up(unsigned int cpu)
 {
 	int err = 0;
+
+#ifdef	CONFIG_MEMORY_HOTPLUG
+	int nid;
+	pg_data_t	*pgdat;
+#endif
+
 	if (!cpu_possible(cpu)) {
 		printk(KERN_ERR "can't online cpu %d because it is not "
 			"configured as may-hotadd at boot time\n", cpu);
@@ -336,6 +342,28 @@
 		return -EINVAL;
 	}
 
+#ifdef	CONFIG_MEMORY_HOTPLUG
+	nid = cpu_to_node(cpu);
+	if (!node_online(nid)) {
+		err = mem_online_node(nid);
+		if (err)
+			return err;
+	}
+
+	pgdat = NODE_DATA(nid);
+	if (!pgdat) {
+		printk(KERN_ERR
+			"Can't online cpu %d due to NULL pgdat\n", cpu);
+		return -ENOMEM;
+	}
+
+	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
+		mutex_lock(&zonelists_mutex);
+		build_all_zonelists(NULL);
+		mutex_unlock(&zonelists_mutex);
+	}
+#endif
+
 	cpu_maps_update_begin();
 
 	if (cpu_hotplug_disabled) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9a50c5f..61d6af7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -946,16 +946,62 @@
  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
  * we structure updates as setting all new allowed nodes, then clearing newly
  * disallowed ones.
- *
- * Called with task's alloc_lock held
  */
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
 					nodemask_t *newmems)
 {
+repeat:
+	/*
+	 * Allow tasks that have access to memory reserves because they have
+	 * been OOM killed to get memory anywhere.
+	 */
+	if (unlikely(test_thread_flag(TIF_MEMDIE)))
+		return;
+	if (current->flags & PF_EXITING) /* Let dying task have memory */
+		return;
+
+	task_lock(tsk);
 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
-	mpol_rebind_task(tsk, &tsk->mems_allowed);
-	mpol_rebind_task(tsk, newmems);
+	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+
+
+	/*
+	 * ensure checking ->mems_allowed_change_disable after setting all new
+	 * allowed nodes.
+	 *
+	 * the read-side task can see an nodemask with new allowed nodes and
+	 * old allowed nodes. and if it allocates page when cpuset clears newly
+	 * disallowed ones continuous, it can see the new allowed bits.
+	 *
+	 * And if setting all new allowed nodes is after the checking, setting
+	 * all new allowed nodes and clearing newly disallowed ones will be done
+	 * continuous, and the read-side task may find no node to alloc page.
+	 */
+	smp_mb();
+
+	/*
+	 * Allocation of memory is very fast, we needn't sleep when waiting
+	 * for the read-side.
+	 */
+	while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+		task_unlock(tsk);
+		if (!task_curr(tsk))
+			yield();
+		goto repeat;
+	}
+
+	/*
+	 * ensure checking ->mems_allowed_change_disable before clearing all new
+	 * disallowed nodes.
+	 *
+	 * if clearing newly disallowed bits before the checking, the read-side
+	 * task may find no node to alloc page.
+	 */
+	smp_mb();
+
+	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
 	tsk->mems_allowed = *newmems;
+	task_unlock(tsk);
 }
 
 /*
@@ -978,9 +1024,7 @@
 	cs = cgroup_cs(scan->cg);
 	guarantee_online_mems(cs, newmems);
 
-	task_lock(p);
 	cpuset_change_task_nodemask(p, newmems);
-	task_unlock(p);
 
 	NODEMASK_FREE(newmems);
 
@@ -1383,9 +1427,7 @@
 	err = set_cpus_allowed_ptr(tsk, cpus_attach);
 	WARN_ON_ONCE(err);
 
-	task_lock(tsk);
 	cpuset_change_task_nodemask(tsk, to);
-	task_unlock(tsk);
 	cpuset_update_task_spread_flag(cs, tsk);
 
 }
diff --git a/kernel/exit.c b/kernel/exit.c
index eabca5a..019a284 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1002,8 +1002,10 @@
 
 	exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
+	task_lock(tsk);
 	mpol_put(tsk->mempolicy);
 	tsk->mempolicy = NULL;
+	task_unlock(tsk);
 #endif
 #ifdef CONFIG_FUTEX
 	if (unlikely(current->pi_state_cache))
diff --git a/kernel/module.c b/kernel/module.c
index a8014bf..625985e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -180,8 +180,6 @@
 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
-extern const struct kernel_symbol __start___ksymtab_gpl_future[];
-extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 extern const unsigned long __start___kcrctab[];
 extern const unsigned long __start___kcrctab_gpl[];
 extern const unsigned long __start___kcrctab_gpl_future[];
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4c93486..84ff5e7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -37,6 +37,7 @@
 #include <linux/highuid.h>
 #include <linux/writeback.h>
 #include <linux/ratelimit.h>
+#include <linux/compaction.h>
 #include <linux/hugetlb.h>
 #include <linux/initrd.h>
 #include <linux/key.h>
@@ -262,6 +263,11 @@
 static int max_sched_shares_ratelimit = NSEC_PER_SEC; /* 1 second */
 #endif
 
+#ifdef CONFIG_COMPACTION
+static int min_extfrag_threshold;
+static int max_extfrag_threshold = 1000;
+#endif
+
 static struct ctl_table kern_table[] = {
 	{
 		.procname	= "sched_child_runs_first",
@@ -1121,6 +1127,25 @@
 		.mode		= 0644,
 		.proc_handler	= drop_caches_sysctl_handler,
 	},
+#ifdef CONFIG_COMPACTION
+	{
+		.procname	= "compact_memory",
+		.data		= &sysctl_compact_memory,
+		.maxlen		= sizeof(int),
+		.mode		= 0200,
+		.proc_handler	= sysctl_compaction_handler,
+	},
+	{
+		.procname	= "extfrag_threshold",
+		.data		= &sysctl_extfrag_threshold,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= sysctl_extfrag_handler,
+		.extra1		= &min_extfrag_threshold,
+		.extra2		= &max_extfrag_threshold,
+	},
+
+#endif /* CONFIG_COMPACTION */
 	{
 		.procname	= "min_free_kbytes",
 		.data		= &min_free_kbytes,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 937d31d..1357c57 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -13,6 +13,7 @@
 #include <linux/file.h>
 #include <linux/ctype.h>
 #include <linux/netdevice.h>
+#include <linux/kernel.h>
 #include <linux/slab.h>
 
 #ifdef CONFIG_SYSCTL_SYSCALL
@@ -1124,11 +1125,6 @@
 	return result;
 }
 
-static unsigned hex_value(int ch)
-{
-	return isdigit(ch) ? ch - '0' : ((ch | 0x20) - 'a') + 10;
-}
-
 static ssize_t bin_uuid(struct file *file,
 	void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
@@ -1156,7 +1152,8 @@
 			if (!isxdigit(str[0]) || !isxdigit(str[1]))
 				goto out;
 
-			uuid[i] = (hex_value(str[0]) << 4) | hex_value(str[1]);
+			uuid[i] = (hex_to_bin(str[0]) << 4) |
+					hex_to_bin(str[1]);
 			str += 2;
 			if (*str == '-')
 				str++;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d85be90..2312089 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1039,10 +1039,10 @@
 
 	  Usage:
 
-	  Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file,
+	  Dynamic debugging is controlled via the 'dynamic_debug/control' file,
 	  which is contained in the 'debugfs' filesystem. Thus, the debugfs
 	  filesystem must first be mounted before making use of this feature.
-	  We refer the control file as: <debugfs>/dynamic_debug/ddebug. This
+	  We refer the control file as: <debugfs>/dynamic_debug/control. This
 	  file contains a list of the debug statements that can be enabled. The
 	  format for each line of the file is:
 
@@ -1057,7 +1057,7 @@
 
 	  From a live system:
 
-		nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug
+		nullarbor:~ # cat <debugfs>/dynamic_debug/control
 		# filename:lineno [module]function flags format
 		fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
 		fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
@@ -1067,23 +1067,23 @@
 
 		// enable the message at line 1603 of file svcsock.c
 		nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
-						<debugfs>/dynamic_debug/ddebug
+						<debugfs>/dynamic_debug/control
 
 		// enable all the messages in file svcsock.c
 		nullarbor:~ # echo -n 'file svcsock.c +p' >
-						<debugfs>/dynamic_debug/ddebug
+						<debugfs>/dynamic_debug/control
 
 		// enable all the messages in the NFS server module
 		nullarbor:~ # echo -n 'module nfsd +p' >
-						<debugfs>/dynamic_debug/ddebug
+						<debugfs>/dynamic_debug/control
 
 		// enable all 12 messages in the function svc_process()
 		nullarbor:~ # echo -n 'func svc_process +p' >
-						<debugfs>/dynamic_debug/ddebug
+						<debugfs>/dynamic_debug/control
 
 		// disable all 12 messages in the function svc_process()
 		nullarbor:~ # echo -n 'func svc_process -p' >
-						<debugfs>/dynamic_debug/ddebug
+						<debugfs>/dynamic_debug/control
 
 	  See Documentation/dynamic-debug-howto.txt for additional information.
 
diff --git a/lib/crc32.c b/lib/crc32.c
index bc5b936..3087ed8 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -48,12 +48,20 @@
 #if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
 
 static inline u32
-crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
+crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 {
-# ifdef __LITTLE_ENDIAN
-#  define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+#  define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
+#  define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
+		tab[2][(crc >> 8) & 255] ^ \
+		tab[1][(crc >> 16) & 255] ^ \
+		tab[0][(crc >> 24) & 255]
 # else
-#  define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+#  define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+#  define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
+		tab[1][(crc >> 8) & 255] ^ \
+		tab[2][(crc >> 16) & 255] ^ \
+		tab[3][(crc >> 24) & 255]
 # endif
 	const u32 *b;
 	size_t    rem_len;
@@ -70,10 +78,7 @@
 	b = (const u32 *)buf;
 	for (--b; len; --len) {
 		crc ^= *++b; /* use pre increment for speed */
-		DO_CRC(0);
-		DO_CRC(0);
-		DO_CRC(0);
-		DO_CRC(0);
+		DO_CRC4;
 	}
 	len = rem_len;
 	/* And the last few bytes */
@@ -85,6 +90,7 @@
 	}
 	return crc;
 #undef DO_CRC
+#undef DO_CRC4
 }
 #endif
 /**
@@ -117,7 +123,7 @@
 u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
 {
 # if CRC_LE_BITS == 8
-	const u32      *tab = crc32table_le;
+	const u32      (*tab)[] = crc32table_le;
 
 	crc = __cpu_to_le32(crc);
 	crc = crc32_body(crc, p, len, tab);
@@ -174,7 +180,7 @@
 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
 {
 # if CRC_BE_BITS == 8
-	const u32      *tab = crc32table_be;
+	const u32      (*tab)[] = crc32table_be;
 
 	crc = __cpu_to_be32(crc);
 	crc = crc32_body(crc, p, len, tab);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index d6b8b9b..3df8eb1 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -456,7 +456,7 @@
 			__func__, (int)len);
 
 	nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
-	if (nwords < 0)
+	if (nwords <= 0)
 		return -EINVAL;
 	if (ddebug_parse_query(words, nwords-1, &query))
 		return -EINVAL;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97..85d0e41 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -7,8 +7,8 @@
 #define LE_TABLE_SIZE (1 << CRC_LE_BITS)
 #define BE_TABLE_SIZE (1 << CRC_BE_BITS)
 
-static uint32_t crc32table_le[LE_TABLE_SIZE];
-static uint32_t crc32table_be[BE_TABLE_SIZE];
+static uint32_t crc32table_le[4][LE_TABLE_SIZE];
+static uint32_t crc32table_be[4][BE_TABLE_SIZE];
 
 /**
  * crc32init_le() - allocate and initialize LE table data
@@ -22,12 +22,19 @@
 	unsigned i, j;
 	uint32_t crc = 1;
 
-	crc32table_le[0] = 0;
+	crc32table_le[0][0] = 0;
 
 	for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
 		crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
 		for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
-			crc32table_le[i + j] = crc ^ crc32table_le[j];
+			crc32table_le[0][i + j] = crc ^ crc32table_le[0][j];
+	}
+	for (i = 0; i < LE_TABLE_SIZE; i++) {
+		crc = crc32table_le[0][i];
+		for (j = 1; j < 4; j++) {
+			crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8);
+			crc32table_le[j][i] = crc;
+		}
 	}
 }
 
@@ -39,25 +46,35 @@
 	unsigned i, j;
 	uint32_t crc = 0x80000000;
 
-	crc32table_be[0] = 0;
+	crc32table_be[0][0] = 0;
 
 	for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
 		crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
 		for (j = 0; j < i; j++)
-			crc32table_be[i + j] = crc ^ crc32table_be[j];
+			crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
+	}
+	for (i = 0; i < BE_TABLE_SIZE; i++) {
+		crc = crc32table_be[0][i];
+		for (j = 1; j < 4; j++) {
+			crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
+			crc32table_be[j][i] = crc;
+		}
 	}
 }
 
-static void output_table(uint32_t table[], int len, char *trans)
+static void output_table(uint32_t table[4][256], int len, char *trans)
 {
-	int i;
+	int i, j;
 
-	for (i = 0; i < len - 1; i++) {
-		if (i % ENTRIES_PER_LINE == 0)
-			printf("\n");
-		printf("%s(0x%8.8xL), ", trans, table[i]);
+	for (j = 0 ; j < 4; j++) {
+		printf("{");
+		for (i = 0; i < len - 1; i++) {
+			if (i % ENTRIES_PER_LINE == 0)
+				printf("\n");
+			printf("%s(0x%8.8xL), ", trans, table[j][i]);
+		}
+		printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
 	}
-	printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
 }
 
 int main(int argc, char** argv)
@@ -66,14 +83,14 @@
 
 	if (CRC_LE_BITS > 1) {
 		crc32init_le();
-		printf("static const u32 crc32table_le[] = {");
+		printf("static const u32 crc32table_le[4][256] = {");
 		output_table(crc32table_le, LE_TABLE_SIZE, "tole");
 		printf("};\n");
 	}
 
 	if (CRC_BE_BITS > 1) {
 		crc32init_be();
-		printf("static const u32 crc32table_be[] = {");
+		printf("static const u32 crc32table_be[4][256] = {");
 		output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
 		printf("};\n");
 	}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 39af256..5d7a480 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -16,6 +16,24 @@
 EXPORT_SYMBOL(hex_asc);
 
 /**
+ * hex_to_bin - convert a hex digit to its real value
+ * @ch: ascii character represents hex digit
+ *
+ * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
+ * input.
+ */
+int hex_to_bin(char ch)
+{
+	if ((ch >= '0') && (ch <= '9'))
+		return ch - '0';
+	ch = tolower(ch);
+	if ((ch >= 'a') && (ch <= 'f'))
+		return ch - 'a' + 10;
+	return -1;
+}
+EXPORT_SYMBOL(hex_to_bin);
+
+/**
  * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
  * @buf: data blob to dump
  * @len: number of bytes in the @buf
@@ -34,7 +52,7 @@
  *
  * E.g.:
  *   hex_dump_to_buffer(frame->data, frame->len, 16, 1,
- *			linebuf, sizeof(linebuf), 1);
+ *			linebuf, sizeof(linebuf), true);
  *
  * example output buffer:
  * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
@@ -65,8 +83,8 @@
 
 		for (j = 0; j < ngroups; j++)
 			lx += scnprintf(linebuf + lx, linebuflen - lx,
-				"%s%16.16llx", j ? " " : "",
-				(unsigned long long)*(ptr8 + j));
+					"%s%16.16llx", j ? " " : "",
+					(unsigned long long)*(ptr8 + j));
 		ascii_column = 17 * ngroups + 2;
 		break;
 	}
@@ -77,7 +95,7 @@
 
 		for (j = 0; j < ngroups; j++)
 			lx += scnprintf(linebuf + lx, linebuflen - lx,
-				"%s%8.8x", j ? " " : "", *(ptr4 + j));
+					"%s%8.8x", j ? " " : "", *(ptr4 + j));
 		ascii_column = 9 * ngroups + 2;
 		break;
 	}
@@ -88,7 +106,7 @@
 
 		for (j = 0; j < ngroups; j++)
 			lx += scnprintf(linebuf + lx, linebuflen - lx,
-				"%s%4.4x", j ? " " : "", *(ptr2 + j));
+					"%s%4.4x", j ? " " : "", *(ptr2 + j));
 		ascii_column = 5 * ngroups + 2;
 		break;
 	}
@@ -111,9 +129,10 @@
 
 	while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
 		linebuf[lx++] = ' ';
-	for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
-		linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
-				: '.';
+	for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
+		ch = ptr[j];
+		linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
+	}
 nil:
 	linebuf[lx++] = '\0';
 }
@@ -143,7 +162,7 @@
  *
  * E.g.:
  *   print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
- *		16, 1, frame->data, frame->len, 1);
+ *		    16, 1, frame->data, frame->len, true);
  *
  * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
  * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f  @ABCDEFGHIJKLMNO
@@ -151,12 +170,12 @@
  * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c  pqrstuvwxyz{|}~.
  */
 void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
-			int rowsize, int groupsize,
-			const void *buf, size_t len, bool ascii)
+		    int rowsize, int groupsize,
+		    const void *buf, size_t len, bool ascii)
 {
 	const u8 *ptr = buf;
 	int i, linelen, remaining = len;
-	unsigned char linebuf[200];
+	unsigned char linebuf[32 * 3 + 2 + 32 + 1];
 
 	if (rowsize != 16 && rowsize != 32)
 		rowsize = 16;
@@ -164,13 +183,14 @@
 	for (i = 0; i < len; i += rowsize) {
 		linelen = min(remaining, rowsize);
 		remaining -= rowsize;
+
 		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
-				linebuf, sizeof(linebuf), ascii);
+				   linebuf, sizeof(linebuf), ascii);
 
 		switch (prefix_type) {
 		case DUMP_PREFIX_ADDRESS:
-			printk("%s%s%*p: %s\n", level, prefix_str,
-				(int)(2 * sizeof(void *)), ptr + i, linebuf);
+			printk("%s%s%p: %s\n",
+			       level, prefix_str, ptr + i, linebuf);
 			break;
 		case DUMP_PREFIX_OFFSET:
 			printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
@@ -196,9 +216,9 @@
  * rowsize of 16, groupsize of 1, and ASCII output included.
  */
 void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
-			const void *buf, size_t len)
+			  const void *buf, size_t len)
 {
 	print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
-			buf, len, 1);
+		       buf, len, true);
 }
 EXPORT_SYMBOL(print_hex_dump_bytes);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 46d34b0..b8a2f54 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -267,7 +267,8 @@
 }
 EXPORT_SYMBOL(strict_strtoll);
 
-static int skip_atoi(const char **s)
+static noinline_for_stack
+int skip_atoi(const char **s)
 {
 	int i = 0;
 
@@ -287,7 +288,8 @@
 /* Formats correctly any integer in [0,99999].
  * Outputs from one to five digits depending on input.
  * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
-static char *put_dec_trunc(char *buf, unsigned q)
+static noinline_for_stack
+char *put_dec_trunc(char *buf, unsigned q)
 {
 	unsigned d3, d2, d1, d0;
 	d1 = (q>>4) & 0xf;
@@ -324,7 +326,8 @@
 	return buf;
 }
 /* Same with if's removed. Always emits five digits */
-static char *put_dec_full(char *buf, unsigned q)
+static noinline_for_stack
+char *put_dec_full(char *buf, unsigned q)
 {
 	/* BTW, if q is in [0,9999], 8-bit ints will be enough, */
 	/* but anyway, gcc produces better code with full-sized ints */
@@ -366,7 +369,8 @@
 	return buf;
 }
 /* No inlining helps gcc to use registers better */
-static noinline char *put_dec(char *buf, unsigned long long num)
+static noinline_for_stack
+char *put_dec(char *buf, unsigned long long num)
 {
 	while (1) {
 		unsigned rem;
@@ -417,8 +421,9 @@
 	s16	precision;	/* # of digits/chars */
 };
 
-static char *number(char *buf, char *end, unsigned long long num,
-			struct printf_spec spec)
+static noinline_for_stack
+char *number(char *buf, char *end, unsigned long long num,
+	     struct printf_spec spec)
 {
 	/* we are called with base 8, 10 or 16, only, thus don't need "G..."  */
 	static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -537,7 +542,8 @@
 	return buf;
 }
 
-static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
+static noinline_for_stack
+char *string(char *buf, char *end, const char *s, struct printf_spec spec)
 {
 	int len, i;
 
@@ -567,8 +573,9 @@
 	return buf;
 }
 
-static char *symbol_string(char *buf, char *end, void *ptr,
-				struct printf_spec spec, char ext)
+static noinline_for_stack
+char *symbol_string(char *buf, char *end, void *ptr,
+		    struct printf_spec spec, char ext)
 {
 	unsigned long value = (unsigned long) ptr;
 #ifdef CONFIG_KALLSYMS
@@ -588,8 +595,9 @@
 #endif
 }
 
-static char *resource_string(char *buf, char *end, struct resource *res,
-				struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *resource_string(char *buf, char *end, struct resource *res,
+		      struct printf_spec spec, const char *fmt)
 {
 #ifndef IO_RSRC_PRINTK_SIZE
 #define IO_RSRC_PRINTK_SIZE	6
@@ -690,8 +698,9 @@
 	return string(buf, end, sym, spec);
 }
 
-static char *mac_address_string(char *buf, char *end, u8 *addr,
-				struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *mac_address_string(char *buf, char *end, u8 *addr,
+			 struct printf_spec spec, const char *fmt)
 {
 	char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
 	char *p = mac_addr;
@@ -714,7 +723,8 @@
 	return string(buf, end, mac_addr, spec);
 }
 
-static char *ip4_string(char *p, const u8 *addr, const char *fmt)
+static noinline_for_stack
+char *ip4_string(char *p, const u8 *addr, const char *fmt)
 {
 	int i;
 	bool leading_zeros = (fmt[0] == 'i');
@@ -763,7 +773,8 @@
 	return p;
 }
 
-static char *ip6_compressed_string(char *p, const char *addr)
+static noinline_for_stack
+char *ip6_compressed_string(char *p, const char *addr)
 {
 	int i, j, range;
 	unsigned char zerolength[8];
@@ -843,7 +854,8 @@
 	return p;
 }
 
-static char *ip6_string(char *p, const char *addr, const char *fmt)
+static noinline_for_stack
+char *ip6_string(char *p, const char *addr, const char *fmt)
 {
 	int i;
 
@@ -858,8 +870,9 @@
 	return p;
 }
 
-static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
-			     struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *ip6_addr_string(char *buf, char *end, const u8 *addr,
+		      struct printf_spec spec, const char *fmt)
 {
 	char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
 
@@ -871,8 +884,9 @@
 	return string(buf, end, ip6_addr, spec);
 }
 
-static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
-			     struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *ip4_addr_string(char *buf, char *end, const u8 *addr,
+		      struct printf_spec spec, const char *fmt)
 {
 	char ip4_addr[sizeof("255.255.255.255")];
 
@@ -881,8 +895,9 @@
 	return string(buf, end, ip4_addr, spec);
 }
 
-static char *uuid_string(char *buf, char *end, const u8 *addr,
-			 struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *uuid_string(char *buf, char *end, const u8 *addr,
+		  struct printf_spec spec, const char *fmt)
 {
 	char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
 	char *p = uuid;
@@ -970,8 +985,9 @@
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
  */
-static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
-			struct printf_spec spec)
+static noinline_for_stack
+char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+	      struct printf_spec spec)
 {
 	if (!ptr)
 		return string(buf, end, "(null)", spec);
@@ -1040,7 +1056,8 @@
  * @precision: precision of a number
  * @qualifier: qualifier of a number (long, size_t, ...)
  */
-static int format_decode(const char *fmt, struct printf_spec *spec)
+static noinline_for_stack
+int format_decode(const char *fmt, struct printf_spec *spec)
 {
 	const char *start = fmt;
 
@@ -1980,7 +1997,7 @@
 		{
 			char *s = (char *)va_arg(args, char *);
 			if (field_width == -1)
-				field_width = SHORT_MAX;
+				field_width = SHRT_MAX;
 			/* first, skip leading white space in buffer */
 			str = skip_spaces(str);
 
diff --git a/mm/Kconfig b/mm/Kconfig
index 9c61158..527136b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -172,6 +172,15 @@
 	default "4"
 
 #
+# support for memory compaction
+config COMPACTION
+	bool "Allow for memory compaction"
+	select MIGRATION
+	depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+	help
+	  Allows the compaction of memory for the allocation of huge pages.
+
+#
 # support for page migration
 #
 config MIGRATION
@@ -180,9 +189,11 @@
 	depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
 	help
 	  Allows the migration of the physical location of pages of processes
-	  while the virtual addresses are not changed. This is useful for
-	  example on NUMA systems to put pages nearer to the processors accessing
-	  the page.
+	  while the virtual addresses are not changed. This is useful in
+	  two situations. The first is on NUMA systems to put pages nearer
+	  to the processors accessing. The second is when allocating huge
+	  pages as migration can relocate pages to satisfy a huge page
+	  allocation instead of reclaiming.
 
 config PHYS_ADDR_T_64BIT
 	def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
diff --git a/mm/Makefile b/mm/Makefile
index 6c2a73a..8982504 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
+obj-$(CONFIG_COMPACTION) += compaction.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_KSM) += ksm.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/compaction.c b/mm/compaction.c
new file mode 100644
index 0000000..94cce51
--- /dev/null
+++ b/mm/compaction.c
@@ -0,0 +1,605 @@
+/*
+ * linux/mm/compaction.c
+ *
+ * Memory compaction for the reduction of external fragmentation. Note that
+ * this heavily depends upon page migration to do all the real heavy
+ * lifting
+ *
+ * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
+ */
+#include <linux/swap.h>
+#include <linux/migrate.h>
+#include <linux/compaction.h>
+#include <linux/mm_inline.h>
+#include <linux/backing-dev.h>
+#include <linux/sysctl.h>
+#include <linux/sysfs.h>
+#include "internal.h"
+
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+	struct list_head freepages;	/* List of free pages to migrate to */
+	struct list_head migratepages;	/* List of pages being migrated */
+	unsigned long nr_freepages;	/* Number of isolated free pages */
+	unsigned long nr_migratepages;	/* Number of pages to migrate */
+	unsigned long free_pfn;		/* isolate_freepages search base */
+	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+
+	/* Account for isolated anon and file pages */
+	unsigned long nr_anon;
+	unsigned long nr_file;
+
+	unsigned int order;		/* order a direct compactor needs */
+	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
+	struct zone *zone;
+};
+
+static unsigned long release_freepages(struct list_head *freelist)
+{
+	struct page *page, *next;
+	unsigned long count = 0;
+
+	list_for_each_entry_safe(page, next, freelist, lru) {
+		list_del(&page->lru);
+		__free_page(page);
+		count++;
+	}
+
+	return count;
+}
+
+/* Isolate free pages onto a private freelist. Must hold zone->lock */
+static unsigned long isolate_freepages_block(struct zone *zone,
+				unsigned long blockpfn,
+				struct list_head *freelist)
+{
+	unsigned long zone_end_pfn, end_pfn;
+	int total_isolated = 0;
+	struct page *cursor;
+
+	/* Get the last PFN we should scan for free pages at */
+	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+	end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
+
+	/* Find the first usable PFN in the block to initialse page cursor */
+	for (; blockpfn < end_pfn; blockpfn++) {
+		if (pfn_valid_within(blockpfn))
+			break;
+	}
+	cursor = pfn_to_page(blockpfn);
+
+	/* Isolate free pages. This assumes the block is valid */
+	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
+		int isolated, i;
+		struct page *page = cursor;
+
+		if (!pfn_valid_within(blockpfn))
+			continue;
+
+		if (!PageBuddy(page))
+			continue;
+
+		/* Found a free page, break it into order-0 pages */
+		isolated = split_free_page(page);
+		total_isolated += isolated;
+		for (i = 0; i < isolated; i++) {
+			list_add(&page->lru, freelist);
+			page++;
+		}
+
+		/* If a page was split, advance to the end of it */
+		if (isolated) {
+			blockpfn += isolated - 1;
+			cursor += isolated - 1;
+		}
+	}
+
+	return total_isolated;
+}
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+
+	int migratetype = get_pageblock_migratetype(page);
+
+	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+		return false;
+
+	/* If the page is a large free page, then allow migration */
+	if (PageBuddy(page) && page_order(page) >= pageblock_order)
+		return true;
+
+	/* If the block is MIGRATE_MOVABLE, allow migration */
+	if (migratetype == MIGRATE_MOVABLE)
+		return true;
+
+	/* Otherwise skip the block */
+	return false;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+				struct compact_control *cc)
+{
+	struct page *page;
+	unsigned long high_pfn, low_pfn, pfn;
+	unsigned long flags;
+	int nr_freepages = cc->nr_freepages;
+	struct list_head *freelist = &cc->freepages;
+
+	pfn = cc->free_pfn;
+	low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+	high_pfn = low_pfn;
+
+	/*
+	 * Isolate free pages until enough are available to migrate the
+	 * pages on cc->migratepages. We stop searching if the migrate
+	 * and free page scanners meet or enough free pages are isolated.
+	 */
+	spin_lock_irqsave(&zone->lock, flags);
+	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+					pfn -= pageblock_nr_pages) {
+		unsigned long isolated;
+
+		if (!pfn_valid(pfn))
+			continue;
+
+		/*
+		 * Check for overlapping nodes/zones. It's possible on some
+		 * configurations to have a setup like
+		 * node0 node1 node0
+		 * i.e. it's possible that all pages within a zones range of
+		 * pages do not belong to a single zone.
+		 */
+		page = pfn_to_page(pfn);
+		if (page_zone(page) != zone)
+			continue;
+
+		/* Check the block is suitable for migration */
+		if (!suitable_migration_target(page))
+			continue;
+
+		/* Found a block suitable for isolating free pages from */
+		isolated = isolate_freepages_block(zone, pfn, freelist);
+		nr_freepages += isolated;
+
+		/*
+		 * Record the highest PFN we isolated pages from. When next
+		 * looking for free pages, the search will restart here as
+		 * page migration may have returned some pages to the allocator
+		 */
+		if (isolated)
+			high_pfn = max(high_pfn, pfn);
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+
+	/* split_free_page does not map the pages */
+	list_for_each_entry(page, freelist, lru) {
+		arch_alloc_page(page, 0);
+		kernel_map_pages(page, 1, 1);
+	}
+
+	cc->free_pfn = high_pfn;
+	cc->nr_freepages = nr_freepages;
+}
+
+/* Update the number of anon and file isolated pages in the zone */
+static void acct_isolated(struct zone *zone, struct compact_control *cc)
+{
+	struct page *page;
+	unsigned int count[NR_LRU_LISTS] = { 0, };
+
+	list_for_each_entry(page, &cc->migratepages, lru) {
+		int lru = page_lru_base_type(page);
+		count[lru]++;
+	}
+
+	cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+	cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+	__mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
+	__mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
+}
+
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct zone *zone)
+{
+
+	unsigned long inactive, isolated;
+
+	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
+					zone_page_state(zone, NR_INACTIVE_ANON);
+	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
+					zone_page_state(zone, NR_ISOLATED_ANON);
+
+	return isolated > inactive;
+}
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static unsigned long isolate_migratepages(struct zone *zone,
+					struct compact_control *cc)
+{
+	unsigned long low_pfn, end_pfn;
+	struct list_head *migratelist = &cc->migratepages;
+
+	/* Do not scan outside zone boundaries */
+	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+	/* Only scan within a pageblock boundary */
+	end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+	/* Do not cross the free scanner or scan within a memory hole */
+	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+		cc->migrate_pfn = end_pfn;
+		return 0;
+	}
+
+	/*
+	 * Ensure that there are not too many pages isolated from the LRU
+	 * list by either parallel reclaimers or compaction. If there are,
+	 * delay for some time until fewer pages are isolated
+	 */
+	while (unlikely(too_many_isolated(zone))) {
+		congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+		if (fatal_signal_pending(current))
+			return 0;
+	}
+
+	/* Time to isolate some pages for migration */
+	spin_lock_irq(&zone->lru_lock);
+	for (; low_pfn < end_pfn; low_pfn++) {
+		struct page *page;
+		if (!pfn_valid_within(low_pfn))
+			continue;
+
+		/* Get the page and skip if free */
+		page = pfn_to_page(low_pfn);
+		if (PageBuddy(page))
+			continue;
+
+		/* Try isolate the page */
+		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
+			continue;
+
+		/* Successfully isolated */
+		del_page_from_lru_list(zone, page, page_lru(page));
+		list_add(&page->lru, migratelist);
+		mem_cgroup_del_lru(page);
+		cc->nr_migratepages++;
+
+		/* Avoid isolating too much */
+		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+			break;
+	}
+
+	acct_isolated(zone, cc);
+
+	spin_unlock_irq(&zone->lru_lock);
+	cc->migrate_pfn = low_pfn;
+
+	return cc->nr_migratepages;
+}
+
+/*
+ * This is a migrate-callback that "allocates" freepages by taking pages
+ * from the isolated freelists in the block we are migrating to.
+ */
+static struct page *compaction_alloc(struct page *migratepage,
+					unsigned long data,
+					int **result)
+{
+	struct compact_control *cc = (struct compact_control *)data;
+	struct page *freepage;
+
+	/* Isolate free pages if necessary */
+	if (list_empty(&cc->freepages)) {
+		isolate_freepages(cc->zone, cc);
+
+		if (list_empty(&cc->freepages))
+			return NULL;
+	}
+
+	freepage = list_entry(cc->freepages.next, struct page, lru);
+	list_del(&freepage->lru);
+	cc->nr_freepages--;
+
+	return freepage;
+}
+
+/*
+ * We cannot control nr_migratepages and nr_freepages fully when migration is
+ * running as migrate_pages() has no knowledge of compact_control. When
+ * migration is complete, we count the number of pages on the lists by hand.
+ */
+static void update_nr_listpages(struct compact_control *cc)
+{
+	int nr_migratepages = 0;
+	int nr_freepages = 0;
+	struct page *page;
+
+	list_for_each_entry(page, &cc->migratepages, lru)
+		nr_migratepages++;
+	list_for_each_entry(page, &cc->freepages, lru)
+		nr_freepages++;
+
+	cc->nr_migratepages = nr_migratepages;
+	cc->nr_freepages = nr_freepages;
+}
+
+static int compact_finished(struct zone *zone,
+						struct compact_control *cc)
+{
+	unsigned int order;
+	unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
+
+	if (fatal_signal_pending(current))
+		return COMPACT_PARTIAL;
+
+	/* Compaction run completes if the migrate and free scanner meet */
+	if (cc->free_pfn <= cc->migrate_pfn)
+		return COMPACT_COMPLETE;
+
+	/* Compaction run is not finished if the watermark is not met */
+	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
+		return COMPACT_CONTINUE;
+
+	if (cc->order == -1)
+		return COMPACT_CONTINUE;
+
+	/* Direct compactor: Is a suitable page free? */
+	for (order = cc->order; order < MAX_ORDER; order++) {
+		/* Job done if page is free of the right migratetype */
+		if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
+			return COMPACT_PARTIAL;
+
+		/* Job done if allocation would set block type */
+		if (order >= pageblock_order && zone->free_area[order].nr_free)
+			return COMPACT_PARTIAL;
+	}
+
+	return COMPACT_CONTINUE;
+}
+
+static int compact_zone(struct zone *zone, struct compact_control *cc)
+{
+	int ret;
+
+	/* Setup to move all movable pages to the end of the zone */
+	cc->migrate_pfn = zone->zone_start_pfn;
+	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
+	cc->free_pfn &= ~(pageblock_nr_pages-1);
+
+	migrate_prep_local();
+
+	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
+		unsigned long nr_migrate, nr_remaining;
+
+		if (!isolate_migratepages(zone, cc))
+			continue;
+
+		nr_migrate = cc->nr_migratepages;
+		migrate_pages(&cc->migratepages, compaction_alloc,
+						(unsigned long)cc, 0);
+		update_nr_listpages(cc);
+		nr_remaining = cc->nr_migratepages;
+
+		count_vm_event(COMPACTBLOCKS);
+		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
+		if (nr_remaining)
+			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
+
+		/* Release LRU pages not migrated */
+		if (!list_empty(&cc->migratepages)) {
+			putback_lru_pages(&cc->migratepages);
+			cc->nr_migratepages = 0;
+		}
+
+	}
+
+	/* Release free pages and check accounting */
+	cc->nr_freepages -= release_freepages(&cc->freepages);
+	VM_BUG_ON(cc->nr_freepages != 0);
+
+	return ret;
+}
+
+static unsigned long compact_zone_order(struct zone *zone,
+						int order, gfp_t gfp_mask)
+{
+	struct compact_control cc = {
+		.nr_freepages = 0,
+		.nr_migratepages = 0,
+		.order = order,
+		.migratetype = allocflags_to_migratetype(gfp_mask),
+		.zone = zone,
+	};
+	INIT_LIST_HEAD(&cc.freepages);
+	INIT_LIST_HEAD(&cc.migratepages);
+
+	return compact_zone(zone, &cc);
+}
+
+int sysctl_extfrag_threshold = 500;
+
+/**
+ * try_to_compact_pages - Direct compact to satisfy a high-order allocation
+ * @zonelist: The zonelist used for the current allocation
+ * @order: The order of the current allocation
+ * @gfp_mask: The GFP mask of the current allocation
+ * @nodemask: The allowed nodes to allocate from
+ *
+ * This is the main entry point for direct page compaction.
+ */
+unsigned long try_to_compact_pages(struct zonelist *zonelist,
+			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+{
+	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+	int may_enter_fs = gfp_mask & __GFP_FS;
+	int may_perform_io = gfp_mask & __GFP_IO;
+	unsigned long watermark;
+	struct zoneref *z;
+	struct zone *zone;
+	int rc = COMPACT_SKIPPED;
+
+	/*
+	 * Check whether it is worth even starting compaction. The order check is
+	 * made because an assumption is made that the page allocator can satisfy
+	 * the "cheaper" orders without taking special steps
+	 */
+	if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
+		return rc;
+
+	count_vm_event(COMPACTSTALL);
+
+	/* Compact each zone in the list */
+	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+								nodemask) {
+		int fragindex;
+		int status;
+
+		/*
+		 * Watermarks for order-0 must be met for compaction. Note
+		 * the 2UL. This is because during migration, copies of
+		 * pages need to be allocated and for a short time, the
+		 * footprint is higher
+		 */
+		watermark = low_wmark_pages(zone) + (2UL << order);
+		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+			continue;
+
+		/*
+		 * fragmentation index determines if allocation failures are
+		 * due to low memory or external fragmentation
+		 *
+		 * index of -1 implies allocations might succeed depending
+		 * 	on watermarks
+		 * index towards 0 implies failure is due to lack of memory
+		 * index towards 1000 implies failure is due to fragmentation
+		 *
+		 * Only compact if a failure would be due to fragmentation.
+		 */
+		fragindex = fragmentation_index(zone, order);
+		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
+			continue;
+
+		if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
+			rc = COMPACT_PARTIAL;
+			break;
+		}
+
+		status = compact_zone_order(zone, order, gfp_mask);
+		rc = max(status, rc);
+
+		if (zone_watermark_ok(zone, order, watermark, 0, 0))
+			break;
+	}
+
+	return rc;
+}
+
+
+/* Compact all zones within a node */
+static int compact_node(int nid)
+{
+	int zoneid;
+	pg_data_t *pgdat;
+	struct zone *zone;
+
+	if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
+		return -EINVAL;
+	pgdat = NODE_DATA(nid);
+
+	/* Flush pending updates to the LRU lists */
+	lru_add_drain_all();
+
+	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+		struct compact_control cc = {
+			.nr_freepages = 0,
+			.nr_migratepages = 0,
+			.order = -1,
+		};
+
+		zone = &pgdat->node_zones[zoneid];
+		if (!populated_zone(zone))
+			continue;
+
+		cc.zone = zone;
+		INIT_LIST_HEAD(&cc.freepages);
+		INIT_LIST_HEAD(&cc.migratepages);
+
+		compact_zone(zone, &cc);
+
+		VM_BUG_ON(!list_empty(&cc.freepages));
+		VM_BUG_ON(!list_empty(&cc.migratepages));
+	}
+
+	return 0;
+}
+
+/* Compact all nodes in the system */
+static int compact_nodes(void)
+{
+	int nid;
+
+	for_each_online_node(nid)
+		compact_node(nid);
+
+	return COMPACT_COMPLETE;
+}
+
+/* The written value is actually unused, all memory is compacted */
+int sysctl_compact_memory;
+
+/* This is the entry point for compacting all nodes via /proc/sys/vm */
+int sysctl_compaction_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos)
+{
+	if (write)
+		return compact_nodes();
+
+	return 0;
+}
+
+int sysctl_extfrag_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *length, loff_t *ppos)
+{
+	proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+	return 0;
+}
+
+#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+ssize_t sysfs_compact_node(struct sys_device *dev,
+			struct sysdev_attribute *attr,
+			const char *buf, size_t count)
+{
+	compact_node(dev->id);
+
+	return count;
+}
+static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
+
+int compaction_register_node(struct node *node)
+{
+	return sysdev_create_file(&node->sysdev, &attr_compact);
+}
+
+void compaction_unregister_node(struct node *node)
+{
+	return sysdev_remove_file(&node->sysdev, &attr_compact);
+}
+#endif /* CONFIG_SYSFS && CONFIG_NUMA */
diff --git a/mm/filemap.c b/mm/filemap.c
index 140ebda..88d7196 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -441,7 +441,7 @@
 	/*
 	 * Splice_read and readahead add shmem/tmpfs pages into the page cache
 	 * before shmem_readpage has a chance to mark them as SwapBacked: they
-	 * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+	 * need to go on the anon lru below, and mem_cgroup_cache_charge
 	 * (called in add_to_page_cache) needs to know where they're going too.
 	 */
 	if (mapping_cap_swap_backed(mapping))
@@ -452,7 +452,7 @@
 		if (page_is_file_cache(page))
 			lru_cache_add_file(page);
 		else
-			lru_cache_add_active_anon(page);
+			lru_cache_add_anon(page);
 	}
 	return ret;
 }
@@ -461,9 +461,15 @@
 #ifdef CONFIG_NUMA
 struct page *__page_cache_alloc(gfp_t gfp)
 {
+	int n;
+	struct page *page;
+
 	if (cpuset_do_page_mem_spread()) {
-		int n = cpuset_mem_spread_node();
-		return alloc_pages_exact_node(n, gfp, 0);
+		get_mems_allowed();
+		n = cpuset_mem_spread_node();
+		page = alloc_pages_exact_node(n, gfp, 0);
+		put_mems_allowed();
+		return page;
 	}
 	return alloc_pages(gfp, 0);
 }
diff --git a/mm/highmem.c b/mm/highmem.c
index bed8a8b..66baa20 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -422,7 +422,7 @@
 
 #endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
 
-#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
+#ifdef CONFIG_DEBUG_HIGHMEM
 
 void debug_kmap_atomic(enum km_type type)
 {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4c9e6bb..54d42b0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -465,11 +465,13 @@
 	struct page *page = NULL;
 	struct mempolicy *mpol;
 	nodemask_t *nodemask;
-	struct zonelist *zonelist = huge_zonelist(vma, address,
-					htlb_alloc_mask, &mpol, &nodemask);
+	struct zonelist *zonelist;
 	struct zone *zone;
 	struct zoneref *z;
 
+	get_mems_allowed();
+	zonelist = huge_zonelist(vma, address,
+					htlb_alloc_mask, &mpol, &nodemask);
 	/*
 	 * A child process with MAP_PRIVATE mappings created by their parent
 	 * have no page reserves. This check ensures that reservations are
@@ -477,11 +479,11 @@
 	 */
 	if (!vma_has_reserves(vma) &&
 			h->free_huge_pages - h->resv_huge_pages == 0)
-		return NULL;
+		goto err;
 
 	/* If reserves cannot be used, ensure enough pages are in the pool */
 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
-		return NULL;
+		goto err;;
 
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 						MAX_NR_ZONES - 1, nodemask) {
@@ -500,7 +502,9 @@
 			break;
 		}
 	}
+err:
 	mpol_cond_put(mpol);
+	put_mems_allowed();
 	return page;
 }
 
diff --git a/mm/ksm.c b/mm/ksm.c
index 956880f..6c3e99b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,14 +318,14 @@
 			  struct anon_vma *anon_vma)
 {
 	rmap_item->anon_vma = anon_vma;
-	atomic_inc(&anon_vma->ksm_refcount);
+	atomic_inc(&anon_vma->external_refcount);
 }
 
 static void drop_anon_vma(struct rmap_item *rmap_item)
 {
 	struct anon_vma *anon_vma = rmap_item->anon_vma;
 
-	if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+	if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
 		int empty = list_empty(&anon_vma->head);
 		spin_unlock(&anon_vma->lock);
 		if (empty)
diff --git a/mm/memory.c b/mm/memory.c
index 833952d..119b7cc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1227,8 +1227,17 @@
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
-/*
- * Do a quick page-table lookup for a single page.
+/**
+ * follow_page - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
  */
 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 			unsigned int flags)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index be211a5..a4cfcdc 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -415,12 +415,14 @@
 	 * This means the page allocator ignores this zone.
 	 * So, zonelist must be updated after online.
 	 */
+	mutex_lock(&zonelists_mutex);
 	if (!populated_zone(zone))
 		need_zonelists_rebuild = 1;
 
 	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
 		online_pages_range);
 	if (ret) {
+		mutex_unlock(&zonelists_mutex);
 		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
 			nr_pages, pfn);
 		memory_notify(MEM_CANCEL_ONLINE, &arg);
@@ -429,8 +431,12 @@
 
 	zone->present_pages += onlined_pages;
 	zone->zone_pgdat->node_present_pages += onlined_pages;
+	if (need_zonelists_rebuild)
+		build_all_zonelists(zone);
+	else
+		zone_pcp_update(zone);
 
-	zone_pcp_update(zone);
+	mutex_unlock(&zonelists_mutex);
 	setup_per_zone_wmarks();
 	calculate_zone_inactive_ratio(zone);
 	if (onlined_pages) {
@@ -438,10 +444,7 @@
 		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
 	}
 
-	if (need_zonelists_rebuild)
-		build_all_zonelists();
-	else
-		vm_total_pages = nr_free_pagecache_pages();
+	vm_total_pages = nr_free_pagecache_pages();
 
 	writeback_set_ratelimit();
 
@@ -482,6 +485,29 @@
 }
 
 
+/*
+ * called by cpu_up() to online a node without onlined memory.
+ */
+int mem_online_node(int nid)
+{
+	pg_data_t	*pgdat;
+	int	ret;
+
+	lock_system_sleep();
+	pgdat = hotadd_new_pgdat(nid, 0);
+	if (pgdat) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	node_set_online(nid);
+	ret = register_one_node(nid);
+	BUG_ON(ret);
+
+out:
+	unlock_system_sleep();
+	return ret;
+}
+
 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 int __ref add_memory(int nid, u64 start, u64 size)
 {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 08f40a2..7575101 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -119,7 +119,22 @@
 
 static const struct mempolicy_operations {
 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
-	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
+	/*
+	 * If read-side task has no lock to protect task->mempolicy, write-side
+	 * task will rebind the task->mempolicy by two step. The first step is
+	 * setting all the newly nodes, and the second step is cleaning all the
+	 * disallowed nodes. In this way, we can avoid finding no node to alloc
+	 * page.
+	 * If we have a lock to protect task->mempolicy in read-side, we do
+	 * rebind directly.
+	 *
+	 * step:
+	 * 	MPOL_REBIND_ONCE - do rebind work at once
+	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
+	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
+	 */
+	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
+			enum mpol_rebind_step step);
 } mpol_ops[MPOL_MAX];
 
 /* Check that the nodemask contains at least one populated zone */
@@ -127,9 +142,6 @@
 {
 	int nd, k;
 
-	/* Check that there is something useful in this mask */
-	k = policy_zone;
-
 	for_each_node_mask(nd, *nodemask) {
 		struct zone *z;
 
@@ -145,7 +157,7 @@
 
 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 {
-	return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
+	return pol->flags & MPOL_MODE_FLAGS;
 }
 
 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
@@ -277,12 +289,19 @@
 	kmem_cache_free(policy_cache, p);
 }
 
-static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
+static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
+				enum mpol_rebind_step step)
 {
 }
 
-static void mpol_rebind_nodemask(struct mempolicy *pol,
-				 const nodemask_t *nodes)
+/*
+ * step:
+ * 	MPOL_REBIND_ONCE  - do rebind work at once
+ * 	MPOL_REBIND_STEP1 - set all the newly nodes
+ * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
+ */
+static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
+				 enum mpol_rebind_step step)
 {
 	nodemask_t tmp;
 
@@ -291,12 +310,31 @@
 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 	else {
-		nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
-			    *nodes);
-		pol->w.cpuset_mems_allowed = *nodes;
+		/*
+		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
+		 * result
+		 */
+		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
+			nodes_remap(tmp, pol->v.nodes,
+					pol->w.cpuset_mems_allowed, *nodes);
+			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
+		} else if (step == MPOL_REBIND_STEP2) {
+			tmp = pol->w.cpuset_mems_allowed;
+			pol->w.cpuset_mems_allowed = *nodes;
+		} else
+			BUG();
 	}
 
-	pol->v.nodes = tmp;
+	if (nodes_empty(tmp))
+		tmp = *nodes;
+
+	if (step == MPOL_REBIND_STEP1)
+		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
+	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
+		pol->v.nodes = tmp;
+	else
+		BUG();
+
 	if (!node_isset(current->il_next, tmp)) {
 		current->il_next = next_node(current->il_next, tmp);
 		if (current->il_next >= MAX_NUMNODES)
@@ -307,7 +345,8 @@
 }
 
 static void mpol_rebind_preferred(struct mempolicy *pol,
-				  const nodemask_t *nodes)
+				  const nodemask_t *nodes,
+				  enum mpol_rebind_step step)
 {
 	nodemask_t tmp;
 
@@ -330,16 +369,45 @@
 	}
 }
 
-/* Migrate a policy to a different set of nodes */
-static void mpol_rebind_policy(struct mempolicy *pol,
-			       const nodemask_t *newmask)
+/*
+ * mpol_rebind_policy - Migrate a policy to a different set of nodes
+ *
+ * If read-side task has no lock to protect task->mempolicy, write-side
+ * task will rebind the task->mempolicy by two step. The first step is
+ * setting all the newly nodes, and the second step is cleaning all the
+ * disallowed nodes. In this way, we can avoid finding no node to alloc
+ * page.
+ * If we have a lock to protect task->mempolicy in read-side, we do
+ * rebind directly.
+ *
+ * step:
+ * 	MPOL_REBIND_ONCE  - do rebind work at once
+ * 	MPOL_REBIND_STEP1 - set all the newly nodes
+ * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
+ */
+static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
+				enum mpol_rebind_step step)
 {
 	if (!pol)
 		return;
-	if (!mpol_store_user_nodemask(pol) &&
+	if (!mpol_store_user_nodemask(pol) && step == 0 &&
 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 		return;
-	mpol_ops[pol->mode].rebind(pol, newmask);
+
+	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
+		return;
+
+	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
+		BUG();
+
+	if (step == MPOL_REBIND_STEP1)
+		pol->flags |= MPOL_F_REBINDING;
+	else if (step == MPOL_REBIND_STEP2)
+		pol->flags &= ~MPOL_F_REBINDING;
+	else if (step >= MPOL_REBIND_NSTEP)
+		BUG();
+
+	mpol_ops[pol->mode].rebind(pol, newmask, step);
 }
 
 /*
@@ -349,9 +417,10 @@
  * Called with task's alloc_lock held.
  */
 
-void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
+void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
+			enum mpol_rebind_step step)
 {
-	mpol_rebind_policy(tsk->mempolicy, new);
+	mpol_rebind_policy(tsk->mempolicy, new, step);
 }
 
 /*
@@ -366,7 +435,7 @@
 
 	down_write(&mm->mmap_sem);
 	for (vma = mm->mmap; vma; vma = vma->vm_next)
-		mpol_rebind_policy(vma->vm_policy, new);
+		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
 	up_write(&mm->mmap_sem);
 }
 
@@ -859,7 +928,7 @@
 	nodes_clear(nmask);
 	node_set(source, nmask);
 
-	check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
+	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
 	if (!list_empty(&pagelist))
@@ -1444,15 +1513,13 @@
 		/*
 		 * Normally, MPOL_BIND allocations are node-local within the
 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
-		 * current node is part of the mask, we use the zonelist for
+		 * current node isn't part of the mask, we use the zonelist for
 		 * the first node in the mask instead.
 		 */
 		if (unlikely(gfp & __GFP_THISNODE) &&
 				unlikely(!node_isset(nd, policy->v.nodes)))
 			nd = first_node(policy->v.nodes);
 		break;
-	case MPOL_INTERLEAVE: /* should not happen */
-		break;
 	default:
 		BUG();
 	}
@@ -1572,6 +1639,8 @@
  * to the struct mempolicy for conditional unref after allocation.
  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
  * @nodemask for filtering the zonelist.
+ *
+ * Must be protected by get_mems_allowed()
  */
 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
 				gfp_t gfp_flags, struct mempolicy **mpol,
@@ -1617,6 +1686,7 @@
 	if (!(mask && current->mempolicy))
 		return false;
 
+	task_lock(current);
 	mempolicy = current->mempolicy;
 	switch (mempolicy->mode) {
 	case MPOL_PREFERRED:
@@ -1636,6 +1706,7 @@
 	default:
 		BUG();
 	}
+	task_unlock(current);
 
 	return true;
 }
@@ -1683,13 +1754,17 @@
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 	struct zonelist *zl;
+	struct page *page;
 
+	get_mems_allowed();
 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
 		unsigned nid;
 
 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
 		mpol_cond_put(pol);
-		return alloc_page_interleave(gfp, 0, nid);
+		page = alloc_page_interleave(gfp, 0, nid);
+		put_mems_allowed();
+		return page;
 	}
 	zl = policy_zonelist(gfp, pol);
 	if (unlikely(mpol_needs_cond_ref(pol))) {
@@ -1699,12 +1774,15 @@
 		struct page *page =  __alloc_pages_nodemask(gfp, 0,
 						zl, policy_nodemask(gfp, pol));
 		__mpol_put(pol);
+		put_mems_allowed();
 		return page;
 	}
 	/*
 	 * fast path:  default or task policy
 	 */
-	return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	put_mems_allowed();
+	return page;
 }
 
 /**
@@ -1729,18 +1807,23 @@
 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 {
 	struct mempolicy *pol = current->mempolicy;
+	struct page *page;
 
 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
 		pol = &default_policy;
 
+	get_mems_allowed();
 	/*
 	 * No reference counting needed for current->mempolicy
 	 * nor system default_policy
 	 */
 	if (pol->mode == MPOL_INTERLEAVE)
-		return alloc_page_interleave(gfp, order, interleave_nodes(pol));
-	return __alloc_pages_nodemask(gfp, order,
+		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
+	else
+		page = __alloc_pages_nodemask(gfp, order,
 			policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
+	put_mems_allowed();
+	return page;
 }
 EXPORT_SYMBOL(alloc_pages_current);
 
@@ -1750,6 +1833,9 @@
  * with the mems_allowed returned by cpuset_mems_allowed().  This
  * keeps mempolicies cpuset relative after its cpuset moves.  See
  * further kernel/cpuset.c update_nodemask().
+ *
+ * current's mempolicy may be rebinded by the other task(the task that changes
+ * cpuset's mems), so we needn't do rebind work for current task.
  */
 
 /* Slow path of a mempolicy duplicate */
@@ -1759,13 +1845,24 @@
 
 	if (!new)
 		return ERR_PTR(-ENOMEM);
+
+	/* task's mempolicy is protected by alloc_lock */
+	if (old == current->mempolicy) {
+		task_lock(current);
+		*new = *old;
+		task_unlock(current);
+	} else
+		*new = *old;
+
 	rcu_read_lock();
 	if (current_cpuset_is_being_rebound()) {
 		nodemask_t mems = cpuset_mems_allowed(current);
-		mpol_rebind_policy(old, &mems);
+		if (new->flags & MPOL_F_REBINDING)
+			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
+		else
+			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
 	}
 	rcu_read_unlock();
-	*new = *old;
 	atomic_set(&new->refcnt, 1);
 	return new;
 }
@@ -1792,16 +1889,6 @@
 	return tompol;
 }
 
-static int mpol_match_intent(const struct mempolicy *a,
-			     const struct mempolicy *b)
-{
-	if (a->flags != b->flags)
-		return 0;
-	if (!mpol_store_user_nodemask(a))
-		return 1;
-	return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
-}
-
 /* Slow path of a mempolicy comparison */
 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
@@ -1809,8 +1896,12 @@
 		return 0;
 	if (a->mode != b->mode)
 		return 0;
-	if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
+	if (a->flags != b->flags)
 		return 0;
+	if (mpol_store_user_nodemask(a))
+		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
+			return 0;
+
 	switch (a->mode) {
 	case MPOL_BIND:
 		/* Fall through */
@@ -2006,26 +2097,22 @@
 			return;
 		/* contextualize the tmpfs mount point mempolicy */
 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
-		if (IS_ERR(new)) {
-			mpol_put(mpol);	/* drop our ref on sb mpol */
-			NODEMASK_SCRATCH_FREE(scratch);
-			return;		/* no valid nodemask intersection */
-		}
+		if (IS_ERR(new))
+			goto put_free; /* no valid nodemask intersection */
 
 		task_lock(current);
 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
 		task_unlock(current);
 		mpol_put(mpol);	/* drop our ref on sb mpol */
-		if (ret) {
-			NODEMASK_SCRATCH_FREE(scratch);
-			mpol_put(new);
-			return;
-		}
+		if (ret)
+			goto put_free;
 
 		/* Create pseudo-vma that contains just the policy */
 		memset(&pvma, 0, sizeof(struct vm_area_struct));
 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
+
+put_free:
 		mpol_put(new);			/* drop initial ref */
 		NODEMASK_SCRATCH_FREE(scratch);
 	}
@@ -2132,9 +2219,15 @@
  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
  * Used only for mpol_parse_str() and mpol_to_str()
  */
-#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
-static const char * const policy_types[] =
-	{ "default", "prefer", "bind", "interleave", "local" };
+#define MPOL_LOCAL MPOL_MAX
+static const char * const policy_modes[] =
+{
+	[MPOL_DEFAULT]    = "default",
+	[MPOL_PREFERRED]  = "prefer",
+	[MPOL_BIND]       = "bind",
+	[MPOL_INTERLEAVE] = "interleave",
+	[MPOL_LOCAL]      = "local"
+};
 
 
 #ifdef CONFIG_TMPFS
@@ -2159,12 +2252,11 @@
 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
 {
 	struct mempolicy *new = NULL;
-	unsigned short uninitialized_var(mode);
+	unsigned short mode;
 	unsigned short uninitialized_var(mode_flags);
 	nodemask_t nodes;
 	char *nodelist = strchr(str, ':');
 	char *flags = strchr(str, '=');
-	int i;
 	int err = 1;
 
 	if (nodelist) {
@@ -2180,13 +2272,12 @@
 	if (flags)
 		*flags++ = '\0';	/* terminate mode string */
 
-	for (i = 0; i <= MPOL_LOCAL; i++) {
-		if (!strcmp(str, policy_types[i])) {
-			mode = i;
+	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
+		if (!strcmp(str, policy_modes[mode])) {
 			break;
 		}
 	}
-	if (i > MPOL_LOCAL)
+	if (mode > MPOL_LOCAL)
 		goto out;
 
 	switch (mode) {
@@ -2250,7 +2341,10 @@
 	if (IS_ERR(new))
 		goto out;
 
-	{
+	if (no_context) {
+		/* save for contextualization */
+		new->w.user_nodemask = nodes;
+	} else {
 		int ret;
 		NODEMASK_SCRATCH(scratch);
 		if (scratch) {
@@ -2266,10 +2360,6 @@
 		}
 	}
 	err = 0;
-	if (no_context) {
-		/* save for contextualization */
-		new->w.user_nodemask = nodes;
-	}
 
 out:
 	/* Restore string for error message */
@@ -2338,11 +2428,11 @@
 		BUG();
 	}
 
-	l = strlen(policy_types[mode]);
+	l = strlen(policy_modes[mode]);
 	if (buffer + maxlen < p + l + 1)
 		return -ENOSPC;
 
-	strcpy(p, policy_types[mode]);
+	strcpy(p, policy_modes[mode]);
 	p += l;
 
 	if (flags & MPOL_MODE_FLAGS) {
diff --git a/mm/migrate.c b/mm/migrate.c
index d3f3f7f..09e2471 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -40,7 +40,8 @@
 
 /*
  * migrate_prep() needs to be called before we start compiling a list of pages
- * to be migrated using isolate_lru_page().
+ * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
+ * undesirable, use migrate_prep_local()
  */
 int migrate_prep(void)
 {
@@ -55,26 +56,29 @@
 	return 0;
 }
 
+/* Do the necessary work of migrate_prep but not if it involves other CPUs */
+int migrate_prep_local(void)
+{
+	lru_add_drain();
+
+	return 0;
+}
+
 /*
  * Add isolated pages on the list back to the LRU under page lock
  * to avoid leaking evictable pages back onto unevictable list.
- *
- * returns the number of pages put back.
  */
-int putback_lru_pages(struct list_head *l)
+void putback_lru_pages(struct list_head *l)
 {
 	struct page *page;
 	struct page *page2;
-	int count = 0;
 
 	list_for_each_entry_safe(page, page2, l, lru) {
 		list_del(&page->lru);
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
 		putback_lru_page(page);
-		count++;
 	}
-	return count;
 }
 
 /*
@@ -490,7 +494,8 @@
  *   < 0 - error code
  *  == 0 - success
  */
-static int move_to_new_page(struct page *newpage, struct page *page)
+static int move_to_new_page(struct page *newpage, struct page *page,
+						int remap_swapcache)
 {
 	struct address_space *mapping;
 	int rc;
@@ -525,10 +530,12 @@
 	else
 		rc = fallback_migrate_page(mapping, newpage, page);
 
-	if (!rc)
-		remove_migration_ptes(page, newpage);
-	else
+	if (rc) {
 		newpage->mapping = NULL;
+	} else {
+		if (remap_swapcache)
+			remove_migration_ptes(page, newpage);
+	}
 
 	unlock_page(newpage);
 
@@ -545,9 +552,11 @@
 	int rc = 0;
 	int *result = NULL;
 	struct page *newpage = get_new_page(page, private, &result);
+	int remap_swapcache = 1;
 	int rcu_locked = 0;
 	int charge = 0;
 	struct mem_cgroup *mem = NULL;
+	struct anon_vma *anon_vma = NULL;
 
 	if (!newpage)
 		return -ENOMEM;
@@ -604,6 +613,34 @@
 	if (PageAnon(page)) {
 		rcu_read_lock();
 		rcu_locked = 1;
+
+		/* Determine how to safely use anon_vma */
+		if (!page_mapped(page)) {
+			if (!PageSwapCache(page))
+				goto rcu_unlock;
+
+			/*
+			 * We cannot be sure that the anon_vma of an unmapped
+			 * swapcache page is safe to use because we don't
+			 * know in advance if the VMA that this page belonged
+			 * to still exists. If the VMA and others sharing the
+			 * data have been freed, then the anon_vma could
+			 * already be invalid.
+			 *
+			 * To avoid this possibility, swapcache pages get
+			 * migrated but are not remapped when migration
+			 * completes
+			 */
+			remap_swapcache = 0;
+		} else {
+			/*
+			 * Take a reference count on the anon_vma if the
+			 * page is mapped so that it is guaranteed to
+			 * exist when the page is remapped later
+			 */
+			anon_vma = page_anon_vma(page);
+			atomic_inc(&anon_vma->external_refcount);
+		}
 	}
 
 	/*
@@ -638,11 +675,20 @@
 
 skip_unmap:
 	if (!page_mapped(page))
-		rc = move_to_new_page(newpage, page);
+		rc = move_to_new_page(newpage, page, remap_swapcache);
 
-	if (rc)
+	if (rc && remap_swapcache)
 		remove_migration_ptes(page, page);
 rcu_unlock:
+
+	/* Drop an anon_vma reference if we took one */
+	if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
+		int empty = list_empty(&anon_vma->head);
+		spin_unlock(&anon_vma->lock);
+		if (empty)
+			anon_vma_free(anon_vma);
+	}
+
 	if (rcu_locked)
 		rcu_read_unlock();
 uncharge:
diff --git a/mm/mincore.c b/mm/mincore.c
index f77433c..9ac42dc 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -19,6 +19,40 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 
+static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
+				unsigned long addr, unsigned long end,
+				unsigned char *vec)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+	struct hstate *h;
+
+	h = hstate_vma(vma);
+	while (1) {
+		unsigned char present;
+		pte_t *ptep;
+		/*
+		 * Huge pages are always in RAM for now, but
+		 * theoretically it needs to be checked.
+		 */
+		ptep = huge_pte_offset(current->mm,
+				       addr & huge_page_mask(h));
+		present = ptep && !huge_pte_none(huge_ptep_get(ptep));
+		while (1) {
+			*vec = present;
+			vec++;
+			addr += PAGE_SIZE;
+			if (addr == end)
+				return;
+			/* check hugepage border */
+			if (!(addr & ~huge_page_mask(h)))
+				break;
+		}
+	}
+#else
+	BUG();
+#endif
+}
+
 /*
  * Later we can get more picky about what "in core" means precisely.
  * For now, simply check to see if the page is in the page cache,
@@ -49,136 +83,16 @@
 	return present;
 }
 
-/*
- * Do a chunk of "sys_mincore()". We've already checked
- * all the arguments, we hold the mmap semaphore: we should
- * just return the amount of info we're asked for.
- */
-static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages)
+static void mincore_unmapped_range(struct vm_area_struct *vma,
+				unsigned long addr, unsigned long end,
+				unsigned char *vec)
 {
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *ptep;
-	spinlock_t *ptl;
-	unsigned long nr;
+	unsigned long nr = (end - addr) >> PAGE_SHIFT;
 	int i;
-	pgoff_t pgoff;
-	struct vm_area_struct *vma = find_vma(current->mm, addr);
 
-	/*
-	 * find_vma() didn't find anything above us, or we're
-	 * in an unmapped hole in the address space: ENOMEM.
-	 */
-	if (!vma || addr < vma->vm_start)
-		return -ENOMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-	if (is_vm_hugetlb_page(vma)) {
-		struct hstate *h;
-		unsigned long nr_huge;
-		unsigned char present;
-
-		i = 0;
-		nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
-		h = hstate_vma(vma);
-		nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
-			  - (addr >> huge_page_shift(h)) + 1;
-		nr_huge = min(nr_huge,
-			      (vma->vm_end - addr) >> huge_page_shift(h));
-		while (1) {
-			/* hugepage always in RAM for now,
-			 * but generally it needs to be check */
-			ptep = huge_pte_offset(current->mm,
-					       addr & huge_page_mask(h));
-			present = !!(ptep &&
-				     !huge_pte_none(huge_ptep_get(ptep)));
-			while (1) {
-				vec[i++] = present;
-				addr += PAGE_SIZE;
-				/* reach buffer limit */
-				if (i == nr)
-					return nr;
-				/* check hugepage border */
-				if (!((addr & ~huge_page_mask(h))
-				      >> PAGE_SHIFT))
-					break;
-			}
-		}
-		return nr;
-	}
-#endif
-
-	/*
-	 * Calculate how many pages there are left in the last level of the
-	 * PTE array for our address.
-	 */
-	nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1));
-
-	/*
-	 * Don't overrun this vma
-	 */
-	nr = min(nr, (vma->vm_end - addr) >> PAGE_SHIFT);
-
-	/*
-	 * Don't return more than the caller asked for
-	 */
-	nr = min(nr, pages);
-
-	pgd = pgd_offset(vma->vm_mm, addr);
-	if (pgd_none_or_clear_bad(pgd))
-		goto none_mapped;
-	pud = pud_offset(pgd, addr);
-	if (pud_none_or_clear_bad(pud))
-		goto none_mapped;
-	pmd = pmd_offset(pud, addr);
-	if (pmd_none_or_clear_bad(pmd))
-		goto none_mapped;
-
-	ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-	for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
-		unsigned char present;
-		pte_t pte = *ptep;
-
-		if (pte_present(pte)) {
-			present = 1;
-
-		} else if (pte_none(pte)) {
-			if (vma->vm_file) {
-				pgoff = linear_page_index(vma, addr);
-				present = mincore_page(vma->vm_file->f_mapping,
-							pgoff);
-			} else
-				present = 0;
-
-		} else if (pte_file(pte)) {
-			pgoff = pte_to_pgoff(pte);
-			present = mincore_page(vma->vm_file->f_mapping, pgoff);
-
-		} else { /* pte is a swap entry */
-			swp_entry_t entry = pte_to_swp_entry(pte);
-			if (is_migration_entry(entry)) {
-				/* migration entries are always uptodate */
-				present = 1;
-			} else {
-#ifdef CONFIG_SWAP
-				pgoff = entry.val;
-				present = mincore_page(&swapper_space, pgoff);
-#else
-				WARN_ON(1);
-				present = 1;
-#endif
-			}
-		}
-
-		vec[i] = present;
-	}
-	pte_unmap_unlock(ptep-1, ptl);
-
-	return nr;
-
-none_mapped:
 	if (vma->vm_file) {
+		pgoff_t pgoff;
+
 		pgoff = linear_page_index(vma, addr);
 		for (i = 0; i < nr; i++, pgoff++)
 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
@@ -186,8 +100,133 @@
 		for (i = 0; i < nr; i++)
 			vec[i] = 0;
 	}
+}
 
-	return nr;
+static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec)
+{
+	unsigned long next;
+	spinlock_t *ptl;
+	pte_t *ptep;
+
+	ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+	do {
+		pte_t pte = *ptep;
+		pgoff_t pgoff;
+
+		next = addr + PAGE_SIZE;
+		if (pte_none(pte))
+			mincore_unmapped_range(vma, addr, next, vec);
+		else if (pte_present(pte))
+			*vec = 1;
+		else if (pte_file(pte)) {
+			pgoff = pte_to_pgoff(pte);
+			*vec = mincore_page(vma->vm_file->f_mapping, pgoff);
+		} else { /* pte is a swap entry */
+			swp_entry_t entry = pte_to_swp_entry(pte);
+
+			if (is_migration_entry(entry)) {
+				/* migration entries are always uptodate */
+				*vec = 1;
+			} else {
+#ifdef CONFIG_SWAP
+				pgoff = entry.val;
+				*vec = mincore_page(&swapper_space, pgoff);
+#else
+				WARN_ON(1);
+				*vec = 1;
+#endif
+			}
+		}
+		vec++;
+	} while (ptep++, addr = next, addr != end);
+	pte_unmap_unlock(ptep - 1, ptl);
+}
+
+static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec)
+{
+	unsigned long next;
+	pmd_t *pmd;
+
+	pmd = pmd_offset(pud, addr);
+	do {
+		next = pmd_addr_end(addr, end);
+		if (pmd_none_or_clear_bad(pmd))
+			mincore_unmapped_range(vma, addr, next, vec);
+		else
+			mincore_pte_range(vma, pmd, addr, next, vec);
+		vec += (next - addr) >> PAGE_SHIFT;
+	} while (pmd++, addr = next, addr != end);
+}
+
+static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec)
+{
+	unsigned long next;
+	pud_t *pud;
+
+	pud = pud_offset(pgd, addr);
+	do {
+		next = pud_addr_end(addr, end);
+		if (pud_none_or_clear_bad(pud))
+			mincore_unmapped_range(vma, addr, next, vec);
+		else
+			mincore_pmd_range(vma, pud, addr, next, vec);
+		vec += (next - addr) >> PAGE_SHIFT;
+	} while (pud++, addr = next, addr != end);
+}
+
+static void mincore_page_range(struct vm_area_struct *vma,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec)
+{
+	unsigned long next;
+	pgd_t *pgd;
+
+	pgd = pgd_offset(vma->vm_mm, addr);
+	do {
+		next = pgd_addr_end(addr, end);
+		if (pgd_none_or_clear_bad(pgd))
+			mincore_unmapped_range(vma, addr, next, vec);
+		else
+			mincore_pud_range(vma, pgd, addr, next, vec);
+		vec += (next - addr) >> PAGE_SHIFT;
+	} while (pgd++, addr = next, addr != end);
+}
+
+/*
+ * Do a chunk of "sys_mincore()". We've already checked
+ * all the arguments, we hold the mmap semaphore: we should
+ * just return the amount of info we're asked for.
+ */
+static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
+{
+	struct vm_area_struct *vma;
+	unsigned long end;
+
+	vma = find_vma(current->mm, addr);
+	if (!vma || addr < vma->vm_start)
+		return -ENOMEM;
+
+	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
+
+	if (is_vm_hugetlb_page(vma)) {
+		mincore_hugetlb_page_range(vma, addr, end, vec);
+		return (end - addr) >> PAGE_SHIFT;
+	}
+
+	end = pmd_addr_end(addr, end);
+
+	if (is_vm_hugetlb_page(vma))
+		mincore_hugetlb_page_range(vma, addr, end, vec);
+	else
+		mincore_page_range(vma, addr, end, vec);
+
+	return (end - addr) >> PAGE_SHIFT;
 }
 
 /*
@@ -247,7 +286,7 @@
 		 * the temporary buffer size.
 		 */
 		down_read(&current->mm->mmap_sem);
-		retval = do_mincore(start, tmp, min(pages, PAGE_SIZE));
+		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
 		up_read(&current->mm->mmap_sem);
 
 		if (retval <= 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a6326c7..08b3499 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -49,6 +49,7 @@
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
 #include <linux/memory.h>
+#include <linux/compaction.h>
 #include <trace/events/kmem.h>
 #include <linux/ftrace_event.h>
 
@@ -475,6 +476,8 @@
 		int migratetype)
 {
 	unsigned long page_idx;
+	unsigned long combined_idx;
+	struct page *buddy;
 
 	if (unlikely(PageCompound(page)))
 		if (unlikely(destroy_compound_page(page, order)))
@@ -488,9 +491,6 @@
 	VM_BUG_ON(bad_range(zone, page));
 
 	while (order < MAX_ORDER-1) {
-		unsigned long combined_idx;
-		struct page *buddy;
-
 		buddy = __page_find_buddy(page, page_idx, order);
 		if (!page_is_buddy(page, buddy, order))
 			break;
@@ -505,8 +505,29 @@
 		order++;
 	}
 	set_page_order(page, order);
-	list_add(&page->lru,
-		&zone->free_area[order].free_list[migratetype]);
+
+	/*
+	 * If this is not the largest possible page, check if the buddy
+	 * of the next-highest order is free. If it is, it's possible
+	 * that pages are being freed that will coalesce soon. In case,
+	 * that is happening, add the free page to the tail of the list
+	 * so it's less likely to be used soon and more likely to be merged
+	 * as a higher order page
+	 */
+	if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+		struct page *higher_page, *higher_buddy;
+		combined_idx = __find_combined_index(page_idx, order);
+		higher_page = page + combined_idx - page_idx;
+		higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+			list_add_tail(&page->lru,
+				&zone->free_area[order].free_list[migratetype]);
+			goto out;
+		}
+	}
+
+	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+out:
 	zone->free_area[order].nr_free++;
 }
 
@@ -599,20 +620,23 @@
 	spin_unlock(&zone->lock);
 }
 
-static void __free_pages_ok(struct page *page, unsigned int order)
+static bool free_pages_prepare(struct page *page, unsigned int order)
 {
-	unsigned long flags;
 	int i;
 	int bad = 0;
-	int wasMlocked = __TestClearPageMlocked(page);
 
 	trace_mm_page_free_direct(page, order);
 	kmemcheck_free_shadow(page, order);
 
-	for (i = 0 ; i < (1 << order) ; ++i)
-		bad += free_pages_check(page + i);
+	for (i = 0; i < (1 << order); i++) {
+		struct page *pg = page + i;
+
+		if (PageAnon(pg))
+			pg->mapping = NULL;
+		bad += free_pages_check(pg);
+	}
 	if (bad)
-		return;
+		return false;
 
 	if (!PageHighMem(page)) {
 		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -622,6 +646,17 @@
 	arch_free_page(page, order);
 	kernel_map_pages(page, 1 << order, 0);
 
+	return true;
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+	unsigned long flags;
+	int wasMlocked = __TestClearPageMlocked(page);
+
+	if (!free_pages_prepare(page, order))
+		return;
+
 	local_irq_save(flags);
 	if (unlikely(wasMlocked))
 		free_page_mlock(page);
@@ -1107,21 +1142,9 @@
 	int migratetype;
 	int wasMlocked = __TestClearPageMlocked(page);
 
-	trace_mm_page_free_direct(page, 0);
-	kmemcheck_free_shadow(page, 0);
-
-	if (PageAnon(page))
-		page->mapping = NULL;
-	if (free_pages_check(page))
+	if (!free_pages_prepare(page, 0))
 		return;
 
-	if (!PageHighMem(page)) {
-		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
-		debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
-	}
-	arch_free_page(page, 0);
-	kernel_map_pages(page, 1, 0);
-
 	migratetype = get_pageblock_migratetype(page);
 	set_page_private(page, migratetype);
 	local_irq_save(flags);
@@ -1188,6 +1211,51 @@
 }
 
 /*
+ * Similar to split_page except the page is already free. As this is only
+ * being used for migration, the migratetype of the block also changes.
+ * As this is called with interrupts disabled, the caller is responsible
+ * for calling arch_alloc_page() and kernel_map_page() after interrupts
+ * are enabled.
+ *
+ * Note: this is probably too low level an operation for use in drivers.
+ * Please consult with lkml before using this in your driver.
+ */
+int split_free_page(struct page *page)
+{
+	unsigned int order;
+	unsigned long watermark;
+	struct zone *zone;
+
+	BUG_ON(!PageBuddy(page));
+
+	zone = page_zone(page);
+	order = page_order(page);
+
+	/* Obey watermarks as if the page was being allocated */
+	watermark = low_wmark_pages(zone) + (1 << order);
+	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+		return 0;
+
+	/* Remove page from free list */
+	list_del(&page->lru);
+	zone->free_area[order].nr_free--;
+	rmv_page_order(page);
+	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+	/* Split into individual pages */
+	set_page_refcounted(page);
+	split_page(page, order);
+
+	if (order >= pageblock_order - 1) {
+		struct page *endpage = page + (1 << order) - 1;
+		for (; page < endpage; page += pageblock_nr_pages)
+			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+	}
+
+	return 1 << order;
+}
+
+/*
  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
@@ -1693,6 +1761,62 @@
 	return page;
 }
 
+#ifdef CONFIG_COMPACTION
+/* Try memory compaction for high-order allocations before reclaim */
+static struct page *
+__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+	struct zonelist *zonelist, enum zone_type high_zoneidx,
+	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+	int migratetype, unsigned long *did_some_progress)
+{
+	struct page *page;
+
+	if (!order || compaction_deferred(preferred_zone))
+		return NULL;
+
+	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
+								nodemask);
+	if (*did_some_progress != COMPACT_SKIPPED) {
+
+		/* Page migration frees to the PCP lists but we want merging */
+		drain_pages(get_cpu());
+		put_cpu();
+
+		page = get_page_from_freelist(gfp_mask, nodemask,
+				order, zonelist, high_zoneidx,
+				alloc_flags, preferred_zone,
+				migratetype);
+		if (page) {
+			preferred_zone->compact_considered = 0;
+			preferred_zone->compact_defer_shift = 0;
+			count_vm_event(COMPACTSUCCESS);
+			return page;
+		}
+
+		/*
+		 * It's bad if compaction run occurs and fails.
+		 * The most likely reason is that pages exist,
+		 * but not enough to satisfy watermarks.
+		 */
+		count_vm_event(COMPACTFAIL);
+		defer_compaction(preferred_zone);
+
+		cond_resched();
+	}
+
+	return NULL;
+}
+#else
+static inline struct page *
+__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+	struct zonelist *zonelist, enum zone_type high_zoneidx,
+	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+	int migratetype, unsigned long *did_some_progress)
+{
+	return NULL;
+}
+#endif /* CONFIG_COMPACTION */
+
 /* The really slow allocator path where we enter direct reclaim */
 static inline struct page *
 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -1879,6 +2003,15 @@
 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
 		goto nopage;
 
+	/* Try direct compaction */
+	page = __alloc_pages_direct_compact(gfp_mask, order,
+					zonelist, high_zoneidx,
+					nodemask,
+					alloc_flags, preferred_zone,
+					migratetype, &did_some_progress);
+	if (page)
+		goto got_pg;
+
 	/* Try direct reclaim and then allocating */
 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
 					zonelist, high_zoneidx,
@@ -1970,10 +2103,13 @@
 	if (unlikely(!zonelist->_zonerefs->zone))
 		return NULL;
 
+	get_mems_allowed();
 	/* The preferred zone is used for statistics later */
 	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
-	if (!preferred_zone)
+	if (!preferred_zone) {
+		put_mems_allowed();
 		return NULL;
+	}
 
 	/* First allocation attempt */
 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -1983,6 +2119,7 @@
 		page = __alloc_pages_slowpath(gfp_mask, order,
 				zonelist, high_zoneidx, nodemask,
 				preferred_zone, migratetype);
+	put_mems_allowed();
 
 	trace_mm_page_alloc(page, order, gfp_mask, migratetype);
 	return page;
@@ -2434,8 +2571,11 @@
 			strncpy((char*)table->data, saved_string,
 				NUMA_ZONELIST_ORDER_LEN);
 			user_zonelist_order = oldval;
-		} else if (oldval != user_zonelist_order)
-			build_all_zonelists();
+		} else if (oldval != user_zonelist_order) {
+			mutex_lock(&zonelists_mutex);
+			build_all_zonelists(NULL);
+			mutex_unlock(&zonelists_mutex);
+		}
 	}
 out:
 	mutex_unlock(&zl_order_mutex);
@@ -2582,7 +2722,7 @@
          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
 	 * If they are really small and used heavily, the system can fall
 	 * into OOM very easily.
-	 * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
+	 * This function detect ZONE_DMA/DMA32 size and configures zone order.
 	 */
 	/* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
 	low_kmem_size = 0;
@@ -2594,6 +2734,15 @@
 				if (zone_type < ZONE_NORMAL)
 					low_kmem_size += z->present_pages;
 				total_size += z->present_pages;
+			} else if (zone_type == ZONE_NORMAL) {
+				/*
+				 * If any node has only lowmem, then node order
+				 * is preferred to allow kernel allocations
+				 * locally; otherwise, they can easily infringe
+				 * on other nodes when there is an abundance of
+				 * lowmem available to allocate from.
+				 */
+				return ZONELIST_ORDER_NODE;
 			}
 		}
 	}
@@ -2776,9 +2925,16 @@
  */
 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static void setup_zone_pageset(struct zone *zone);
+
+/*
+ * Global mutex to protect against size modification of zonelists
+ * as well as to serialize pageset setup for the new populated zone.
+ */
+DEFINE_MUTEX(zonelists_mutex);
 
 /* return values int ....just for stop_machine() */
-static int __build_all_zonelists(void *dummy)
+static __init_refok int __build_all_zonelists(void *data)
 {
 	int nid;
 	int cpu;
@@ -2793,6 +2949,14 @@
 		build_zonelist_cache(pgdat);
 	}
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+	/* Setup real pagesets for the new zone */
+	if (data) {
+		struct zone *zone = data;
+		setup_zone_pageset(zone);
+	}
+#endif
+
 	/*
 	 * Initialize the boot_pagesets that are going to be used
 	 * for bootstrapping processors. The real pagesets for
@@ -2812,7 +2976,11 @@
 	return 0;
 }
 
-void build_all_zonelists(void)
+/*
+ * Called with zonelists_mutex held always
+ * unless system_state == SYSTEM_BOOTING.
+ */
+void build_all_zonelists(void *data)
 {
 	set_zonelist_order();
 
@@ -2823,7 +2991,7 @@
 	} else {
 		/* we have to stop all cpus to guarantee there is no user
 		   of zonelist */
-		stop_machine(__build_all_zonelists, NULL, NULL);
+		stop_machine(__build_all_zonelists, data, NULL);
 		/* cpuset refresh routine should be here */
 	}
 	vm_total_pages = nr_free_pagecache_pages();
@@ -3146,31 +3314,34 @@
 		pcp->batch = PAGE_SHIFT * 8;
 }
 
+static __meminit void setup_zone_pageset(struct zone *zone)
+{
+	int cpu;
+
+	zone->pageset = alloc_percpu(struct per_cpu_pageset);
+
+	for_each_possible_cpu(cpu) {
+		struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
+
+		setup_pageset(pcp, zone_batchsize(zone));
+
+		if (percpu_pagelist_fraction)
+			setup_pagelist_highmark(pcp,
+				(zone->present_pages /
+					percpu_pagelist_fraction));
+	}
+}
+
 /*
  * Allocate per cpu pagesets and initialize them.
  * Before this call only boot pagesets were available.
- * Boot pagesets will no longer be used by this processorr
- * after setup_per_cpu_pageset().
  */
 void __init setup_per_cpu_pageset(void)
 {
 	struct zone *zone;
-	int cpu;
 
-	for_each_populated_zone(zone) {
-		zone->pageset = alloc_percpu(struct per_cpu_pageset);
-
-		for_each_possible_cpu(cpu) {
-			struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
-
-			setup_pageset(pcp, zone_batchsize(zone));
-
-			if (percpu_pagelist_fraction)
-				setup_pagelist_highmark(pcp,
-					(zone->present_pages /
-						percpu_pagelist_fraction));
-		}
-	}
+	for_each_populated_zone(zone)
+		setup_zone_pageset(zone);
 }
 
 static noinline __init_refok
diff --git a/mm/readahead.c b/mm/readahead.c
index dfa9a1a..77506a2 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -523,7 +523,7 @@
  * @req_size: hint: total size of the read which the caller is performing in
  *            pagecache pages
  *
- * page_cache_async_ondemand() should be called when a page is used which
+ * page_cache_async_readahead() should be called when a page is used which
  * has the PG_readahead flag; this is a marker to suggest that the application
  * has used up enough of the readahead window that we should start pulling in
  * more pages.
diff --git a/mm/rmap.c b/mm/rmap.c
index 0feeef8..38a336e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -250,7 +250,7 @@
 	list_del(&anon_vma_chain->same_anon_vma);
 
 	/* We must garbage collect the anon_vma if it's empty */
-	empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
+	empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
 	spin_unlock(&anon_vma->lock);
 
 	if (empty)
@@ -274,7 +274,7 @@
 	struct anon_vma *anon_vma = data;
 
 	spin_lock_init(&anon_vma->lock);
-	ksm_refcount_init(anon_vma);
+	anonvma_external_refcount_init(anon_vma);
 	INIT_LIST_HEAD(&anon_vma->head);
 }
 
@@ -1131,6 +1131,20 @@
 	return ret;
 }
 
+static bool is_vma_temporary_stack(struct vm_area_struct *vma)
+{
+	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+	if (!maybe_stack)
+		return false;
+
+	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+						VM_STACK_INCOMPLETE_SETUP)
+		return true;
+
+	return false;
+}
+
 /**
  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  * rmap method
@@ -1159,7 +1173,21 @@
 
 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
 		struct vm_area_struct *vma = avc->vma;
-		unsigned long address = vma_address(page, vma);
+		unsigned long address;
+
+		/*
+		 * During exec, a temporary VMA is setup and later moved.
+		 * The VMA is moved under the anon_vma lock but not the
+		 * page tables leading to a race where migration cannot
+		 * find the migration ptes. Rather than increasing the
+		 * locking requirements of exec(), migration skips
+		 * temporary VMAs until after exec() completes.
+		 */
+		if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
+				is_vma_temporary_stack(vma))
+			continue;
+
+		address = vma_address(page, vma);
 		if (address == -EFAULT)
 			continue;
 		ret = try_to_unmap_one(page, vma, address, flags);
@@ -1355,10 +1383,8 @@
 	/*
 	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
 	 * because that depends on page_mapped(); but not all its usages
-	 * are holding mmap_sem, which also gave the necessary guarantee
-	 * (that this anon_vma's slab has not already been destroyed).
-	 * This needs to be reviewed later: avoiding page_lock_anon_vma()
-	 * is risky, and currently limits the usefulness of rmap_walk().
+	 * are holding mmap_sem. Users without mmap_sem are required to
+	 * take a reference count to prevent the anon_vma disappearing
 	 */
 	anon_vma = page_anon_vma(page);
 	if (!anon_vma)
diff --git a/mm/shmem.c b/mm/shmem.c
index 0cd7f66..4ef9797 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -433,8 +433,6 @@
 
 		spin_unlock(&info->lock);
 		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
-		if (page)
-			set_page_private(page, 0);
 		spin_lock(&info->lock);
 
 		if (!page) {
diff --git a/mm/slab.c b/mm/slab.c
index 50a73fc..02786e1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3217,10 +3217,12 @@
 	if (in_interrupt() || (flags & __GFP_THISNODE))
 		return NULL;
 	nid_alloc = nid_here = numa_node_id();
+	get_mems_allowed();
 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
 		nid_alloc = cpuset_mem_spread_node();
 	else if (current->mempolicy)
 		nid_alloc = slab_node(current->mempolicy);
+	put_mems_allowed();
 	if (nid_alloc != nid_here)
 		return ____cache_alloc_node(cachep, flags, nid_alloc);
 	return NULL;
@@ -3247,6 +3249,7 @@
 	if (flags & __GFP_THISNODE)
 		return NULL;
 
+	get_mems_allowed();
 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
@@ -3302,6 +3305,7 @@
 			}
 		}
 	}
+	put_mems_allowed();
 	return obj;
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index e46e312..26f0cb9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1360,6 +1360,7 @@
 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
 		return NULL;
 
+	get_mems_allowed();
 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 		struct kmem_cache_node *n;
@@ -1369,10 +1370,13 @@
 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
 				n->nr_partial > s->min_partial) {
 			page = get_partial_node(n);
-			if (page)
+			if (page) {
+				put_mems_allowed();
 				return page;
+			}
 		}
 	}
+	put_mems_allowed();
 #endif
 	return NULL;
 }
diff --git a/mm/sparse.c b/mm/sparse.c
index dc0cc4d..95ac219 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -382,13 +382,15 @@
 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
 {
 	struct page *map;
+	unsigned long size;
 
 	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
 	if (map)
 		return map;
 
-	map = alloc_bootmem_pages_node(NODE_DATA(nid),
-		       PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
+	size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
+	map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
+					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 	return map;
 }
 void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -412,7 +414,8 @@
 	}
 
 	size = PAGE_ALIGN(size);
-	map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
+	map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
+					 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 	if (map) {
 		for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 			if (!present_section_nr(pnum))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3ff3311..915dceb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -73,10 +73,14 @@
 
 	int swappiness;
 
-	int all_unreclaimable;
-
 	int order;
 
+	/*
+	 * Intend to reclaim enough contenious memory rather than to reclaim
+	 * enough amount memory. I.e, it's the mode for high order allocation.
+	 */
+	bool lumpy_reclaim_mode;
+
 	/* Which cgroup do we reclaim from */
 	struct mem_cgroup *mem_cgroup;
 
@@ -85,12 +89,6 @@
 	 * are scanned.
 	 */
 	nodemask_t	*nodemask;
-
-	/* Pluggable isolate pages callback */
-	unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
-			unsigned long *scanned, int order, int mode,
-			struct zone *z, struct mem_cgroup *mem_cont,
-			int active, int file);
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -575,7 +573,7 @@
 	referenced_page = TestClearPageReferenced(page);
 
 	/* Lumpy reclaim - ignore references */
-	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+	if (sc->lumpy_reclaim_mode)
 		return PAGEREF_RECLAIM;
 
 	/*
@@ -839,11 +837,6 @@
 	return nr_reclaimed;
 }
 
-/* LRU Isolation modes. */
-#define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
-#define ISOLATE_ACTIVE 1	/* Isolate active pages. */
-#define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
-
 /*
  * Attempt to remove the specified page from its LRU.  Only take this page
  * if it is of the appropriate PageActive status.  Pages which are being
@@ -1011,7 +1004,6 @@
 					struct list_head *dst,
 					unsigned long *scanned, int order,
 					int mode, struct zone *z,
-					struct mem_cgroup *mem_cont,
 					int active, int file)
 {
 	int lru = LRU_BASE;
@@ -1130,7 +1122,6 @@
 	unsigned long nr_scanned = 0;
 	unsigned long nr_reclaimed = 0;
 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
-	int lumpy_reclaim = 0;
 
 	while (unlikely(too_many_isolated(zone, file, sc))) {
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1140,17 +1131,6 @@
 			return SWAP_CLUSTER_MAX;
 	}
 
-	/*
-	 * If we need a large contiguous chunk of memory, or have
-	 * trouble getting a small set of contiguous pages, we
-	 * will reclaim both active and inactive pages.
-	 *
-	 * We use the same threshold as pageout congestion_wait below.
-	 */
-	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		lumpy_reclaim = 1;
-	else if (sc->order && priority < DEF_PRIORITY - 2)
-		lumpy_reclaim = 1;
 
 	pagevec_init(&pvec, 1);
 
@@ -1163,15 +1143,15 @@
 		unsigned long nr_freed;
 		unsigned long nr_active;
 		unsigned int count[NR_LRU_LISTS] = { 0, };
-		int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+		int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
 		unsigned long nr_anon;
 		unsigned long nr_file;
 
-		nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
-			     &page_list, &nr_scan, sc->order, mode,
-				zone, sc->mem_cgroup, 0, file);
-
 		if (scanning_global_lru(sc)) {
+			nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
+							&page_list, &nr_scan,
+							sc->order, mode,
+							zone, 0, file);
 			zone->pages_scanned += nr_scan;
 			if (current_is_kswapd())
 				__count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1179,6 +1159,16 @@
 			else
 				__count_zone_vm_events(PGSCAN_DIRECT, zone,
 						       nr_scan);
+		} else {
+			nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
+							&page_list, &nr_scan,
+							sc->order, mode,
+							zone, sc->mem_cgroup,
+							0, file);
+			/*
+			 * mem_cgroup_isolate_pages() keeps track of
+			 * scanned pages on its own.
+			 */
 		}
 
 		if (nr_taken == 0)
@@ -1216,7 +1206,7 @@
 		 * but that should be acceptable to the caller
 		 */
 		if (nr_freed < nr_taken && !current_is_kswapd() &&
-		    lumpy_reclaim) {
+		    sc->lumpy_reclaim_mode) {
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 			/*
@@ -1356,16 +1346,23 @@
 
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
-	nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
-					ISOLATE_ACTIVE, zone,
-					sc->mem_cgroup, 1, file);
-	/*
-	 * zone->pages_scanned is used for detect zone's oom
-	 * mem_cgroup remembers nr_scan by itself.
-	 */
 	if (scanning_global_lru(sc)) {
+		nr_taken = isolate_pages_global(nr_pages, &l_hold,
+						&pgscanned, sc->order,
+						ISOLATE_ACTIVE, zone,
+						1, file);
 		zone->pages_scanned += pgscanned;
+	} else {
+		nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
+						&pgscanned, sc->order,
+						ISOLATE_ACTIVE, zone,
+						sc->mem_cgroup, 1, file);
+		/*
+		 * mem_cgroup_isolate_pages() keeps track of
+		 * scanned pages on its own.
+		 */
 	}
+
 	reclaim_stat->recent_scanned[file] += nr_taken;
 
 	__count_zone_vm_events(PGREFILL, zone, pgscanned);
@@ -1519,21 +1516,52 @@
 }
 
 /*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+				       unsigned long *nr_saved_scan)
+{
+	unsigned long nr;
+
+	*nr_saved_scan += nr_to_scan;
+	nr = *nr_saved_scan;
+
+	if (nr >= SWAP_CLUSTER_MAX)
+		*nr_saved_scan = 0;
+	else
+		nr = 0;
+
+	return nr;
+}
+
+/*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
  * by looking at the fraction of the pages scanned we did rotate back
  * onto the active list instead of evict.
  *
- * percent[0] specifies how much pressure to put on ram/swap backed
- * memory, while percent[1] determines pressure on the file LRUs.
+ * nr[0] = anon pages to scan; nr[1] = file pages to scan
  */
-static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
-					unsigned long *percent)
+static void get_scan_count(struct zone *zone, struct scan_control *sc,
+					unsigned long *nr, int priority)
 {
 	unsigned long anon, file, free;
 	unsigned long anon_prio, file_prio;
 	unsigned long ap, fp;
 	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+	u64 fraction[2], denominator;
+	enum lru_list l;
+	int noswap = 0;
+
+	/* If we have no swap space, do not bother scanning anon pages. */
+	if (!sc->may_swap || (nr_swap_pages <= 0)) {
+		noswap = 1;
+		fraction[0] = 0;
+		fraction[1] = 1;
+		denominator = 1;
+		goto out;
+	}
 
 	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
 		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
@@ -1545,9 +1573,10 @@
 		/* If we have very few page cache pages,
 		   force-scan anon pages. */
 		if (unlikely(file + free <= high_wmark_pages(zone))) {
-			percent[0] = 100;
-			percent[1] = 0;
-			return;
+			fraction[0] = 1;
+			fraction[1] = 0;
+			denominator = 1;
+			goto out;
 		}
 	}
 
@@ -1594,29 +1623,37 @@
 	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
 	fp /= reclaim_stat->recent_rotated[1] + 1;
 
-	/* Normalize to percentages */
-	percent[0] = 100 * ap / (ap + fp + 1);
-	percent[1] = 100 - percent[0];
+	fraction[0] = ap;
+	fraction[1] = fp;
+	denominator = ap + fp + 1;
+out:
+	for_each_evictable_lru(l) {
+		int file = is_file_lru(l);
+		unsigned long scan;
+
+		scan = zone_nr_lru_pages(zone, sc, l);
+		if (priority || noswap) {
+			scan >>= priority;
+			scan = div64_u64(scan * fraction[file], denominator);
+		}
+		nr[l] = nr_scan_try_batch(scan,
+					  &reclaim_stat->nr_saved_scan[l]);
+	}
 }
 
-/*
- * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
- * until we collected @swap_cluster_max pages to scan.
- */
-static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
-				       unsigned long *nr_saved_scan)
+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
 {
-	unsigned long nr;
-
-	*nr_saved_scan += nr_to_scan;
-	nr = *nr_saved_scan;
-
-	if (nr >= SWAP_CLUSTER_MAX)
-		*nr_saved_scan = 0;
+	/*
+	 * If we need a large contiguous chunk of memory, or have
+	 * trouble getting a small set of contiguous pages, we
+	 * will reclaim both active and inactive pages.
+	 */
+	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+		sc->lumpy_reclaim_mode = 1;
+	else if (sc->order && priority < DEF_PRIORITY - 2)
+		sc->lumpy_reclaim_mode = 1;
 	else
-		nr = 0;
-
-	return nr;
+		sc->lumpy_reclaim_mode = 0;
 }
 
 /*
@@ -1627,33 +1664,13 @@
 {
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
-	unsigned long percent[2];	/* anon @ 0; file @ 1 */
 	enum lru_list l;
 	unsigned long nr_reclaimed = sc->nr_reclaimed;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
-	int noswap = 0;
 
-	/* If we have no swap space, do not bother scanning anon pages. */
-	if (!sc->may_swap || (nr_swap_pages <= 0)) {
-		noswap = 1;
-		percent[0] = 0;
-		percent[1] = 100;
-	} else
-		get_scan_ratio(zone, sc, percent);
+	get_scan_count(zone, sc, nr, priority);
 
-	for_each_evictable_lru(l) {
-		int file = is_file_lru(l);
-		unsigned long scan;
-
-		scan = zone_nr_lru_pages(zone, sc, l);
-		if (priority || noswap) {
-			scan >>= priority;
-			scan = (scan * percent[file]) / 100;
-		}
-		nr[l] = nr_scan_try_batch(scan,
-					  &reclaim_stat->nr_saved_scan[l]);
-	}
+	set_lumpy_reclaim_mode(priority, sc);
 
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
@@ -1707,14 +1724,14 @@
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static int shrink_zones(int priority, struct zonelist *zonelist,
 					struct scan_control *sc)
 {
 	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
 	struct zoneref *z;
 	struct zone *zone;
+	int progress = 0;
 
-	sc->all_unreclaimable = 1;
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
 					sc->nodemask) {
 		if (!populated_zone(zone))
@@ -1730,19 +1747,19 @@
 
 			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
 				continue;	/* Let kswapd poll it */
-			sc->all_unreclaimable = 0;
 		} else {
 			/*
 			 * Ignore cpuset limitation here. We just want to reduce
 			 * # of used pages by us regardless of memory shortage.
 			 */
-			sc->all_unreclaimable = 0;
 			mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
 							priority);
 		}
 
 		shrink_zone(priority, zone, sc);
+		progress = 1;
 	}
+	return progress;
 }
 
 /*
@@ -1774,6 +1791,7 @@
 	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
 	unsigned long writeback_threshold;
 
+	get_mems_allowed();
 	delayacct_freepages_start();
 
 	if (scanning_global_lru(sc))
@@ -1795,7 +1813,7 @@
 		sc->nr_scanned = 0;
 		if (!priority)
 			disable_swap_token();
-		shrink_zones(priority, zonelist, sc);
+		ret = shrink_zones(priority, zonelist, sc);
 		/*
 		 * Don't shrink slabs when reclaiming memory from
 		 * over limit cgroups
@@ -1832,7 +1850,7 @@
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 	}
 	/* top priority shrink_zones still had more to do? don't OOM, then */
-	if (!sc->all_unreclaimable && scanning_global_lru(sc))
+	if (ret && scanning_global_lru(sc))
 		ret = sc->nr_reclaimed;
 out:
 	/*
@@ -1857,6 +1875,7 @@
 		mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
 
 	delayacct_freepages_end();
+	put_mems_allowed();
 
 	return ret;
 }
@@ -1873,7 +1892,6 @@
 		.swappiness = vm_swappiness,
 		.order = order,
 		.mem_cgroup = NULL,
-		.isolate_pages = isolate_pages_global,
 		.nodemask = nodemask,
 	};
 
@@ -1894,7 +1912,6 @@
 		.swappiness = swappiness,
 		.order = 0,
 		.mem_cgroup = mem,
-		.isolate_pages = mem_cgroup_isolate_pages,
 	};
 	nodemask_t nm  = nodemask_of_node(nid);
 
@@ -1928,7 +1945,6 @@
 		.swappiness = swappiness,
 		.order = 0,
 		.mem_cgroup = mem_cont,
-		.isolate_pages = mem_cgroup_isolate_pages,
 		.nodemask = NULL, /* we don't care the placement */
 	};
 
@@ -2006,7 +2022,6 @@
 		.swappiness = vm_swappiness,
 		.order = order,
 		.mem_cgroup = NULL,
-		.isolate_pages = isolate_pages_global,
 	};
 	/*
 	 * temp_priority is used to remember the scanning priority at which
@@ -2385,7 +2400,6 @@
 		.hibernation_mode = 1,
 		.swappiness = vm_swappiness,
 		.order = 0,
-		.isolate_pages = isolate_pages_global,
 	};
 	struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
 	struct task_struct *p = current;
@@ -2570,7 +2584,6 @@
 		.gfp_mask = gfp_mask,
 		.swappiness = vm_swappiness,
 		.order = order,
-		.isolate_pages = isolate_pages_global,
 	};
 	unsigned long slab_reclaimable;
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fa12ea3..7759941 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -16,6 +16,7 @@
 #include <linux/cpu.h>
 #include <linux/vmstat.h>
 #include <linux/sched.h>
+#include <linux/math64.h>
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -379,7 +380,86 @@
 }
 #endif
 
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_COMPACTION
+struct contig_page_info {
+	unsigned long free_pages;
+	unsigned long free_blocks_total;
+	unsigned long free_blocks_suitable;
+};
+
+/*
+ * Calculate the number of free pages in a zone, how many contiguous
+ * pages are free and how many are large enough to satisfy an allocation of
+ * the target size. Note that this function makes no attempt to estimate
+ * how many suitable free blocks there *might* be if MOVABLE pages were
+ * migrated. Calculating that is possible, but expensive and can be
+ * figured out from userspace
+ */
+static void fill_contig_page_info(struct zone *zone,
+				unsigned int suitable_order,
+				struct contig_page_info *info)
+{
+	unsigned int order;
+
+	info->free_pages = 0;
+	info->free_blocks_total = 0;
+	info->free_blocks_suitable = 0;
+
+	for (order = 0; order < MAX_ORDER; order++) {
+		unsigned long blocks;
+
+		/* Count number of free blocks */
+		blocks = zone->free_area[order].nr_free;
+		info->free_blocks_total += blocks;
+
+		/* Count free base pages */
+		info->free_pages += blocks << order;
+
+		/* Count the suitable free blocks */
+		if (order >= suitable_order)
+			info->free_blocks_suitable += blocks <<
+						(order - suitable_order);
+	}
+}
+
+/*
+ * A fragmentation index only makes sense if an allocation of a requested
+ * size would fail. If that is true, the fragmentation index indicates
+ * whether external fragmentation or a lack of memory was the problem.
+ * The value can be used to determine if page reclaim or compaction
+ * should be used
+ */
+static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
+{
+	unsigned long requested = 1UL << order;
+
+	if (!info->free_blocks_total)
+		return 0;
+
+	/* Fragmentation index only makes sense when a request would fail */
+	if (info->free_blocks_suitable)
+		return -1000;
+
+	/*
+	 * Index is between 0 and 1 so return within 3 decimal places
+	 *
+	 * 0 => allocation would fail due to lack of memory
+	 * 1 => allocation would fail due to fragmentation
+	 */
+	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
+}
+
+/* Same as __fragmentation index but allocs contig_page_info on stack */
+int fragmentation_index(struct zone *zone, unsigned int order)
+{
+	struct contig_page_info info;
+
+	fill_contig_page_info(zone, order, &info);
+	return __fragmentation_index(order, &info);
+}
+#endif
+
+#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
@@ -432,7 +512,9 @@
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
 }
+#endif
 
+#ifdef CONFIG_PROC_FS
 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
 						struct zone *zone)
 {
@@ -693,6 +775,16 @@
 	"allocstall",
 
 	"pgrotated",
+
+#ifdef CONFIG_COMPACTION
+	"compact_blocks_moved",
+	"compact_pages_moved",
+	"compact_pagemigrate_failed",
+	"compact_stall",
+	"compact_fail",
+	"compact_success",
+#endif
+
 #ifdef CONFIG_HUGETLB_PAGE
 	"htlb_buddy_alloc_success",
 	"htlb_buddy_alloc_fail",
@@ -954,3 +1046,162 @@
 	return 0;
 }
 module_init(setup_vmstat)
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
+#include <linux/debugfs.h>
+
+static struct dentry *extfrag_debug_root;
+
+/*
+ * Return an index indicating how much of the available free memory is
+ * unusable for an allocation of the requested size.
+ */
+static int unusable_free_index(unsigned int order,
+				struct contig_page_info *info)
+{
+	/* No free memory is interpreted as all free memory is unusable */
+	if (info->free_pages == 0)
+		return 1000;
+
+	/*
+	 * Index should be a value between 0 and 1. Return a value to 3
+	 * decimal places.
+	 *
+	 * 0 => no fragmentation
+	 * 1 => high fragmentation
+	 */
+	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
+
+}
+
+static void unusable_show_print(struct seq_file *m,
+					pg_data_t *pgdat, struct zone *zone)
+{
+	unsigned int order;
+	int index;
+	struct contig_page_info info;
+
+	seq_printf(m, "Node %d, zone %8s ",
+				pgdat->node_id,
+				zone->name);
+	for (order = 0; order < MAX_ORDER; ++order) {
+		fill_contig_page_info(zone, order, &info);
+		index = unusable_free_index(order, &info);
+		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
+	}
+
+	seq_putc(m, '\n');
+}
+
+/*
+ * Display unusable free space index
+ *
+ * The unusable free space index measures how much of the available free
+ * memory cannot be used to satisfy an allocation of a given size and is a
+ * value between 0 and 1. The higher the value, the more of free memory is
+ * unusable and by implication, the worse the external fragmentation is. This
+ * can be expressed as a percentage by multiplying by 100.
+ */
+static int unusable_show(struct seq_file *m, void *arg)
+{
+	pg_data_t *pgdat = (pg_data_t *)arg;
+
+	/* check memoryless node */
+	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
+		return 0;
+
+	walk_zones_in_node(m, pgdat, unusable_show_print);
+
+	return 0;
+}
+
+static const struct seq_operations unusable_op = {
+	.start	= frag_start,
+	.next	= frag_next,
+	.stop	= frag_stop,
+	.show	= unusable_show,
+};
+
+static int unusable_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &unusable_op);
+}
+
+static const struct file_operations unusable_file_ops = {
+	.open		= unusable_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static void extfrag_show_print(struct seq_file *m,
+					pg_data_t *pgdat, struct zone *zone)
+{
+	unsigned int order;
+	int index;
+
+	/* Alloc on stack as interrupts are disabled for zone walk */
+	struct contig_page_info info;
+
+	seq_printf(m, "Node %d, zone %8s ",
+				pgdat->node_id,
+				zone->name);
+	for (order = 0; order < MAX_ORDER; ++order) {
+		fill_contig_page_info(zone, order, &info);
+		index = __fragmentation_index(order, &info);
+		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
+	}
+
+	seq_putc(m, '\n');
+}
+
+/*
+ * Display fragmentation index for orders that allocations would fail for
+ */
+static int extfrag_show(struct seq_file *m, void *arg)
+{
+	pg_data_t *pgdat = (pg_data_t *)arg;
+
+	walk_zones_in_node(m, pgdat, extfrag_show_print);
+
+	return 0;
+}
+
+static const struct seq_operations extfrag_op = {
+	.start	= frag_start,
+	.next	= frag_next,
+	.stop	= frag_stop,
+	.show	= extfrag_show,
+};
+
+static int extfrag_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &extfrag_op);
+}
+
+static const struct file_operations extfrag_file_ops = {
+	.open		= extfrag_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init extfrag_debug_init(void)
+{
+	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
+	if (!extfrag_debug_root)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("unusable_index", 0444,
+			extfrag_debug_root, NULL, &unusable_file_ops))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("extfrag_index", 0444,
+			extfrag_debug_root, NULL, &extfrag_file_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+
+module_init(extfrag_debug_init);
+#endif
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 77d3aab..149f821 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -394,7 +394,7 @@
 				const char *sptr = va_arg(ap, const char *);
 				int16_t len = 0;
 				if (sptr)
-					len = MIN(strlen(sptr), USHORT_MAX);
+					len = MIN(strlen(sptr), USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
 								"w", len);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 1b08cae..07395f8 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -296,7 +296,7 @@
 {
 	if (likely(ndp <= 0xFF))
 		return 1;
-	return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
+	return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
 }
 
 int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9de6a69..baeec29 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1686,8 +1686,8 @@
 			return -ENOPROTOOPT;
 		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
 			val = 8;
-		else if (val > USHORT_MAX)
-			val = USHORT_MAX;
+		else if (val > USHRT_MAX)
+			val = USHRT_MAX;
 		up->pcslen = val;
 		up->pcflag |= UDPLITE_SEND_CC;
 		break;
@@ -1700,8 +1700,8 @@
 			return -ENOPROTOOPT;
 		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
 			val = 8;
-		else if (val > USHORT_MAX)
-			val = USHORT_MAX;
+		else if (val > USHRT_MAX)
+			val = USHRT_MAX;
 		up->pcrlen = val;
 		up->pcflag |= UDPLITE_RECV_CC;
 		break;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7301975..ba9360a 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -259,7 +259,7 @@
 	skb_queue_head_init(&sta->tx_filtered);
 
 	for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
-		sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX);
+		sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
 
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 	printk(KERN_DEBUG "%s: Allocated STA %pM\n",
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 1211053..dac219a 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -783,7 +783,7 @@
 	port = ntohl(*p);
 	dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
 			task->tk_msg.rpc_proc->p_name, port);
-	if (unlikely(port > USHORT_MAX))
+	if (unlikely(port > USHRT_MAX))
 		return -EIO;
 
 	rpcb->r_port = port;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3fc3253..dcd0132 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -166,7 +166,6 @@
 int xprt_load_transport(const char *transport_name)
 {
 	struct xprt_class *t;
-	char module_name[sizeof t->name + 5];
 	int result;
 
 	result = 0;
@@ -178,9 +177,7 @@
 		}
 	}
 	spin_unlock(&xprt_list_lock);
-	strcpy(module_name, "xprt");
-	strncat(module_name, transport_name, sizeof t->name);
-	result = request_module(module_name);
+	result = request_module("xprt%s", transport_name);
 out:
 	return result;
 }
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index f2bbea9..bd88f11 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1382,6 +1382,21 @@
 			ERROR("trailing whitespace\n" . $herevet);
 		}
 
+# check for Kconfig help text having a real description
+		if ($realfile =~ /Kconfig/ &&
+		    $line =~ /\+?\s*(---)?help(---)?$/) {
+			my $length = 0;
+			for (my $l = $linenr; defined($lines[$l]); $l++) {
+				my $f = $lines[$l];
+				$f =~ s/#.*//;
+				$f =~ s/^\s+//;
+				next if ($f =~ /^$/);
+				last if ($f =~ /^\s*config\s/);
+				$length++;
+			}
+			WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($length < 4);
+		}
+
 # check we are in a valid source file if not then ignore this hunk
 		next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/);
 
@@ -2586,6 +2601,11 @@
 			CHK("architecture specific defines should be avoided\n" .  $herecurr);
 		}
 
+# Check that the storage class is at the beginning of a declaration
+		if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) {
+			WARN("storage class should be at the beginning of the declaration\n" . $herecurr)
+		}
+
 # check the location of the inline attribute, that it is between
 # storage class and type.
 		if ($line =~ /\b$Type\s+$Inline\b/ ||
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 6f97a13..b228198 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.23';
+my $V = '0.24';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -25,6 +25,7 @@
 my $email_subscriber_list = 0;
 my $email_git_penguin_chiefs = 0;
 my $email_git = 1;
+my $email_git_all_signature_types = 0;
 my $email_git_blame = 0;
 my $email_git_min_signatures = 1;
 my $email_git_max_maintainers = 5;
@@ -51,9 +52,9 @@
 my $exit = 0;
 
 my @penguin_chief = ();
-push(@penguin_chief,"Linus Torvalds:torvalds\@linux-foundation.org");
+push(@penguin_chief, "Linus Torvalds:torvalds\@linux-foundation.org");
 #Andrew wants in on most everything - 2009/01/14
-#push(@penguin_chief,"Andrew Morton:akpm\@linux-foundation.org");
+#push(@penguin_chief, "Andrew Morton:akpm\@linux-foundation.org");
 
 my @penguin_chief_names = ();
 foreach my $chief (@penguin_chief) {
@@ -63,7 +64,16 @@
 	push(@penguin_chief_names, $chief_name);
     }
 }
-my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
+my $penguin_chiefs = "\(" . join("|", @penguin_chief_names) . "\)";
+
+# Signature types of people who are either
+# 	a) responsible for the code in question, or
+# 	b) familiar enough with it to give relevant feedback
+my @signature_tags = ();
+push(@signature_tags, "Signed-off-by:");
+push(@signature_tags, "Reviewed-by:");
+push(@signature_tags, "Acked-by:");
+my $signaturePattern = "\(" . join("|", @signature_tags) . "\)";
 
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
@@ -97,9 +107,34 @@
     "blame_commit_pattern" => "^([0-9a-f]+):"
 );
 
+if (-f "${lk_path}.get_maintainer.conf") {
+    my @conf_args;
+    open(my $conffile, '<', "${lk_path}.get_maintainer.conf")
+	or warn "$P: Can't open .get_maintainer.conf: $!\n";
+    while (<$conffile>) {
+	my $line = $_;
+
+	$line =~ s/\s*\n?$//g;
+	$line =~ s/^\s*//g;
+	$line =~ s/\s+/ /g;
+
+	next if ($line =~ m/^\s*#/);
+	next if ($line =~ m/^\s*$/);
+
+	my @words = split(" ", $line);
+	foreach my $word (@words) {
+	    last if ($word =~ m/^#/);
+	    push (@conf_args, $word);
+	}
+    }
+    close($conffile);
+    unshift(@ARGV, @conf_args) if @conf_args;
+}
+
 if (!GetOptions(
 		'email!' => \$email,
 		'git!' => \$email_git,
+		'git-all-signature-types!' => \$email_git_all_signature_types,
 		'git-blame!' => \$email_git_blame,
 		'git-chief-penguins!' => \$email_git_penguin_chiefs,
 		'git-min-signatures=i' => \$email_git_min_signatures,
@@ -180,6 +215,10 @@
 	. "a linux kernel source tree.\n";
 }
 
+if ($email_git_all_signature_types) {
+    $signaturePattern = "(.+?)[Bb][Yy]:";
+}
+
 ## Read MAINTAINERS for type/value pairs
 
 my @typevalue = ();
@@ -497,13 +536,15 @@
 MAINTAINER field selection options:
   --email => print email address(es) if any
     --git => include recent git \*-by: signers
+    --git-all-signature-types => include signers regardless of signature type
+        or use only ${signaturePattern} signers (default: $email_git_all_signature_types)
     --git-chief-penguins => include ${penguin_chiefs}
-    --git-min-signatures => number of signatures required (default: 1)
-    --git-max-maintainers => maximum maintainers to add (default: 5)
-    --git-min-percent => minimum percentage of commits required (default: 5)
+    --git-min-signatures => number of signatures required (default: $email_git_min_signatures)
+    --git-max-maintainers => maximum maintainers to add (default: $email_git_max_maintainers)
+    --git-min-percent => minimum percentage of commits required (default: $email_git_min_percent)
     --git-blame => use git blame to find modified commits for patch or file
-    --git-since => git history to use (default: 1-year-ago)
-    --hg-since => hg history to use (default: -365)
+    --git-since => git history to use (default: $email_git_since)
+    --hg-since => hg history to use (default: $email_hg_since)
     --m => include maintainer(s) if any
     --n => include name 'Full Name <addr\@domain.tld>'
     --l => include list(s) if any
@@ -556,6 +597,11 @@
           --git-min-signatures, --git-max-maintainers, --git-min-percent, and
           --git-blame
       Use --hg-since not --git-since to control date selection
+  File ".get_maintainer.conf", if it exists in the linux kernel source root
+      directory, can change whatever get_maintainer defaults are desired.
+      Entries in this file can be any command line argument.
+      This file is prepended to any additional command line arguments.
+      Multiple lines and # comments are allowed.
 EOT
 }
 
@@ -964,7 +1010,7 @@
 
     $commits = grep(/$pattern/, @lines);	# of commits
 
-    @lines = grep(/^[-_ 	a-z]+by:.*\@.*$/i, @lines);
+    @lines = grep(/^[ \t]*${signaturePattern}.*\@.*$/, @lines);
     if (!$email_git_penguin_chiefs) {
 	@lines = grep(!/${penguin_chiefs}/i, @lines);
     }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index ef03a82a0..d37f713 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -669,7 +669,7 @@
 	struct keyring_list *klist =
 		container_of(rcu, struct keyring_list, rcu);
 
-	if (klist->delkey != USHORT_MAX)
+	if (klist->delkey != USHRT_MAX)
 		key_put(klist->keys[klist->delkey]);
 	kfree(klist);
 }
@@ -746,7 +746,7 @@
 			max += klist->maxkeys;
 
 		ret = -ENFILE;
-		if (max > USHORT_MAX - 1)
+		if (max > USHRT_MAX - 1)
 			goto error_quota;
 		size = sizeof(*klist) + sizeof(struct key *) * max;
 		if (size > PAGE_SIZE)
@@ -763,7 +763,7 @@
 			       sizeof(struct key *) * klist->nkeys);
 			nklist->delkey = klist->nkeys;
 			nklist->nkeys = klist->nkeys + 1;
-			klist->delkey = USHORT_MAX;
+			klist->delkey = USHRT_MAX;
 		} else {
 			nklist->nkeys = 1;
 			nklist->delkey = 0;