Merge branch 'devel-stable' into devel

Conflicts:
	arch/arm/Kconfig
	arch/arm/include/asm/system.h
	arch/arm/mm/Kconfig
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 00f46d9..6e8f05c 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -149,10 +149,10 @@
 
 #define USER(x...)				\
 9999:	x;					\
-	.section __ex_table,"a";		\
+	.pushsection __ex_table,"a";		\
 	.align	3;				\
 	.long	9999b,9001f;			\
-	.previous
+	.popsection
 
 /*
  * SMP data memory barrier
@@ -193,10 +193,10 @@
 	.error	"Unsupported inc macro argument"
 	.endif
 
-	.section __ex_table,"a"
+	.pushsection __ex_table,"a"
 	.align	3
 	.long	9999b, \abort
-	.previous
+	.popsection
 	.endm
 
 	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
@@ -234,10 +234,10 @@
 	.error	"Unsupported inc macro argument"
 	.endif
 
-	.section __ex_table,"a"
+	.pushsection __ex_table,"a"
 	.align	3
 	.long	9999b, \abort
-	.previous
+	.popsection
 	.endr
 	.endm
 
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 72da7e0..0d08d41 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
 #include <asm/glue.h>
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
+#include <asm/outercache.h>
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
@@ -219,12 +220,6 @@
 	void (*dma_flush_range)(const void *, const void *);
 };
 
-struct outer_cache_fns {
-	void (*inv_range)(unsigned long, unsigned long);
-	void (*clean_range)(unsigned long, unsigned long);
-	void (*flush_range)(unsigned long, unsigned long);
-};
-
 /*
  * Select the calling method
  */
@@ -281,37 +276,6 @@
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE
-
-extern struct outer_cache_fns outer_cache;
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.inv_range)
-		outer_cache.inv_range(start, end);
-}
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.clean_range)
-		outer_cache.clean_range(start, end);
-}
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.flush_range)
-		outer_cache.flush_range(start, end);
-}
-
-#else
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{ }
-
-#endif
-
 /*
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index 7a0690d..b56c138 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
 #define __ASM_CLKDEV_H
 
 struct clk;
+struct device;
 
 struct clk_lookup {
 	struct list_head	node;
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index bff0564..51662fe 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -9,6 +9,8 @@
 #include <asm/ptrace.h>
 #include <asm/user.h>
 
+struct task_struct;
+
 typedef unsigned long elf_greg_t;
 typedef unsigned long elf_freg_t[3];
 
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index bfcc159..540a044 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -21,14 +21,14 @@
 	"2:	strt	%0, [%2]\n"				\
 	"	mov	%0, #0\n"				\
 	"3:\n"							\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 4f, 2b, 4f\n"			\
-	"	.previous\n"					\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"4:	mov	%0, %4\n"				\
 	"	b	3b\n"					\
-	"	.previous"					\
+	"	.popsection"					\
 	: "=&r" (ret), "=&r" (oldval)				\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
 	: "cc", "memory")
@@ -102,14 +102,14 @@
 	"	it	eq	@ explicit IT needed for the 2b label\n"
 	"2:	streqt	%2, [%3]\n"
 	"3:\n"
-	"	.section __ex_table,\"a\"\n"
+	"	.pushsection __ex_table,\"a\"\n"
 	"	.align	3\n"
 	"	.long	1b, 4f, 2b, 4f\n"
-	"	.previous\n"
-	"	.section .fixup,\"ax\"\n"
+	"	.popsection\n"
+	"	.pushsection .fixup,\"ax\"\n"
 	"4:	mov	%0, %4\n"
 	"	b	3b\n"
-	"	.previous"
+	"	.popsection"
 	: "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00..feb988a 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
 
 #define kmap_prot		PAGE_KERNEL
 
-#define flush_cache_kmaps()	flush_cache_all()
+#define flush_cache_kmaps() \
+	do { \
+		if (cache_is_vivt()) \
+			flush_cache_all(); \
+	} while (0)
 
 extern pte_t *pkmap_page_table;
 
@@ -21,11 +25,20 @@
 extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
 extern void *kmap_atomic(struct page *page, enum km_type type);
 extern void kunmap_atomic(void *kvaddr, enum km_type type);
 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
 
 #endif
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a..237282f 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
 
 #ifndef __ASSEMBLY__
 struct irqaction;
+struct pt_regs;
 extern void migrate_irqs(void);
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949..c4b2ea3 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@
 	KM_IRQ1,
 	KM_SOFTIRQ0,
 	KM_SOFTIRQ1,
+	KM_L1_CACHE,
 	KM_L2_CACHE,
 	KM_TYPE_NR
 };
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 0000000..25f76ba
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
+/*
+ * arch/arm/include/asm/outercache.h
+ *
+ * Copyright (C) 2010 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_OUTERCACHE_H
+#define __ASM_OUTERCACHE_H
+
+struct outer_cache_fns {
+	void (*inv_range)(unsigned long, unsigned long);
+	void (*clean_range)(unsigned long, unsigned long);
+	void (*flush_range)(unsigned long, unsigned long);
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	void (*sync)(void);
+#endif
+};
+
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.inv_range)
+		outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.clean_range)
+		outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.flush_range)
+		outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
+#ifdef CONFIG_OUTER_CACHE_SYNC
+static inline void outer_sync(void)
+{
+	if (outer_cache.sync)
+		outer_cache.sync();
+}
+#else
+static inline void outer_sync(void)
+{ }
+#endif
+
+#endif	/* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 02f5d99..5f4f480 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
 #include <linux/linkage.h>
 #include <linux/irqflags.h>
 
+#include <asm/outercache.h>
+
 #define __exception	__attribute__((section(".exception.text")))
 
 struct thread_info;
@@ -137,10 +139,12 @@
 #define dmb() __asm__ __volatile__ ("" : : : "memory")
 #endif
 
-#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()		dmb()
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#define mb()		do { dsb(); outer_sync(); } while (0)
 #define rmb()		dmb()
-#define wmb()		dmb()
+#define wmb()		mb()
 #else
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #else
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
+#define smp_mb()	dmb()
+#define smp_rmb()	dmb()
+#define smp_wmb()	dmb()
 #endif
 
 #define read_barrier_depends()		do { } while(0)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 1d6bd40..33e4a48 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -229,16 +229,16 @@
 	__asm__ __volatile__(					\
 	"1:	ldrbt	%1,[%2]\n"				\
 	"2:\n"							\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"	.align	2\n"					\
 	"3:	mov	%0, %3\n"				\
 	"	mov	%1, #0\n"				\
 	"	b	2b\n"					\
-	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 3b\n"				\
-	"	.previous"					\
+	"	.popsection"					\
 	: "+r" (err), "=&r" (x)					\
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
@@ -265,16 +265,16 @@
 	__asm__ __volatile__(					\
 	"1:	ldrt	%1,[%2]\n"				\
 	"2:\n"							\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"	.align	2\n"					\
 	"3:	mov	%0, %3\n"				\
 	"	mov	%1, #0\n"				\
 	"	b	2b\n"					\
-	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 3b\n"				\
-	"	.previous"					\
+	"	.popsection"					\
 	: "+r" (err), "=&r" (x)					\
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
@@ -310,15 +310,15 @@
 	__asm__ __volatile__(					\
 	"1:	strbt	%1,[%2]\n"				\
 	"2:\n"							\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"	.align	2\n"					\
 	"3:	mov	%0, %3\n"				\
 	"	b	2b\n"					\
-	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 3b\n"				\
-	"	.previous"					\
+	"	.popsection"					\
 	: "+r" (err)						\
 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
 	: "cc")
@@ -343,15 +343,15 @@
 	__asm__ __volatile__(					\
 	"1:	strt	%1,[%2]\n"				\
 	"2:\n"							\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"	.align	2\n"					\
 	"3:	mov	%0, %3\n"				\
 	"	b	2b\n"					\
-	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 3b\n"				\
-	"	.previous"					\
+	"	.popsection"					\
 	: "+r" (err)						\
 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
 	: "cc")
@@ -371,16 +371,16 @@
  THUMB(	"1:	strt	" __reg_oper1 ", [%1]\n"	)	\
  THUMB(	"2:	strt	" __reg_oper0 ", [%1, #4]\n"	)	\
 	"3:\n"							\
-	"	.section .fixup,\"ax\"\n"			\
+	"	.pushsection .fixup,\"ax\"\n"			\
 	"	.align	2\n"					\
 	"4:	mov	%0, %3\n"				\
 	"	b	3b\n"					\
-	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
+	"	.popsection\n"					\
+	"	.pushsection __ex_table,\"a\"\n"		\
 	"	.align	3\n"					\
 	"	.long	1b, 4b\n"				\
 	"	.long	2b, 4b\n"				\
-	"	.previous"					\
+	"	.popsection"					\
 	: "+r" (err), "+r" (__pu_addr)				\
 	: "r" (x), "i" (-EFAULT)				\
 	: "cc")
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f..47f023a 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@
 #endif /* CONFIG_IWMMXT */
 
 #ifdef CONFIG_VFP
-#if __LINUX_ARM_ARCH__ < 6
-/* For ARM pre-v6, we use fstmiax and fldmiax.  This adds one extra
- * word after the registers, and a word of padding at the end for
- * alignment.  */
 #define VFP_MAGIC		0x56465001
-#define VFP_STORAGE_SIZE	152
-#else
-#define VFP_MAGIC		0x56465002
-#define VFP_STORAGE_SIZE	144
-#endif
 
 struct vfp_sigframe
 {
 	unsigned long		magic;
 	unsigned long		size;
-	union vfp_state		storage;
-};
+	struct user_vfp		ufp;
+	struct user_vfp_exc	ufp_exc;
+} __attribute__((__aligned__(8)));
+
+/*
+ *  8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
+ *  4 bytes padding.
+ */
+#define VFP_STORAGE_SIZE	sizeof(struct vfp_sigframe)
+
 #endif /* CONFIG_VFP */
 
 /*
@@ -91,7 +90,7 @@
 #ifdef CONFIG_IWMMXT
 	struct iwmmxt_sigframe	iwmmxt;
 #endif
-#if 0 && defined CONFIG_VFP /* Not yet saved.  */
+#ifdef CONFIG_VFP
 	struct vfp_sigframe	vfp;
 #endif
 	/* Something that isn't a valid magic number for any coprocessor.  */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e05..05ac4b0 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@
 
 /*
  * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
- * are ignored by the ptrace system call.
+ * are ignored by the ptrace system call and the signal handler.
  */
 struct user_vfp {
 	unsigned long long fpregs[32];
 	unsigned long fpscr;
 };
 
+/*
+ * VFP exception registers exposed to user space during signal delivery.
+ * Fields not relavant to the current VFP architecture are ignored.
+ */
+struct user_vfp_exc {
+	unsigned long	fpexc;
+	unsigned long	fpinst;
+	unsigned long	fpinst2;
+};
+
 #endif /* _ARM_USER_H */